code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def __UpperCAmelCase ( __a : Tuple ) -> List[Any]:
"""simple docstring"""
_a : int = False
while is_sorted is False: # Until all the indices are traversed keep looping
_a : Tuple = True
for i in range(0 ,len(__a ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_a , _a : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_a : Optional[int] = False
for i in range(1 ,len(__a ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_a , _a : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_a : Union[str, Any] = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
a__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
a__ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 235
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98
| 0
|
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , **__a : Any ) -> Any:
requires_backends(self , ["bs4"] )
super().__init__(**__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : int = []
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Any = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_UpperCamelCase : Any = parent.find_all(child.name , recursive=__a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__a ) else next(i for i, s in enumerate(__a , 1 ) if s is child ) )
_UpperCamelCase : List[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[Any] ) -> List[Any]:
_UpperCamelCase : Any = BeautifulSoup(__a , "html.parser" )
_UpperCamelCase : Dict = []
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[str] = []
for element in html_code.descendants:
if type(__a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_UpperCamelCase : List[str] = html.unescape(__a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.xpath_soup(__a )
stringaxtag_seq.append(__a )
stringaxsubs_seq.append(__a )
if len(__a ) != len(__a ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(__a ) != len(__a ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Tuple , __a : int ) -> List[Any]:
_UpperCamelCase : int = ""
for tagname, subs in zip(__a , __a ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : List[str] , __a : str ) -> BatchFeature:
_UpperCamelCase : str = False
# Check that strings has a valid type
if isinstance(__a , __a ):
_UpperCamelCase : int = True
elif isinstance(__a , (list, tuple) ):
if len(__a ) == 0 or isinstance(html_strings[0] , __a ):
_UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F'''but is of type {type(__a )}.''' )
_UpperCamelCase : Optional[Any] = bool(isinstance(__a , (list, tuple) ) and (isinstance(html_strings[0] , __a )) )
if not is_batched:
_UpperCamelCase : str = [html_strings]
# Get nodes + xpaths
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Any = []
for html_string in html_strings:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : str = self.get_three_from_single(__a )
nodes.append(__a )
_UpperCamelCase : Any = []
for node, tag_list, sub_list in zip(__a , __a , __a ):
_UpperCamelCase : Dict = self.construct_xpath(__a , __a )
xpath_strings.append(__a )
xpaths.append(__a )
# return as Dict
_UpperCamelCase : Optional[Any] = {"nodes": nodes, "xpaths": xpaths}
_UpperCamelCase : Any = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 310
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw ).convert("""RGB""" )
return image
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : str , A__ : int ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowerCamelCase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
__lowerCamelCase = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__lowerCamelCase = torch.cat((q_bias, torch.zeros_like(A__ , requires_grad=A__ ), v_bias) )
__lowerCamelCase = qkv_bias
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = 364 if """coco""" in model_name else 224
__lowerCamelCase = InstructBlipVisionConfig(image_size=A__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__lowerCamelCase = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowerCamelCase = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__lowerCamelCase = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
__lowerCamelCase = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__lowerCamelCase = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
__lowerCamelCase = InstructBlipConfig(vision_config=A__ , text_config=A__ , qformer_config=A__ )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( A__ : Any , A__ : str=None , A__ : Any=False ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
__lowerCamelCase = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__lowerCamelCase = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
__lowerCamelCase, __lowerCamelCase = get_blipa_config(A__ )
__lowerCamelCase = InstructBlipForConditionalGeneration(A__ ).eval()
__lowerCamelCase = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
__lowerCamelCase, __lowerCamelCase = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__lowerCamelCase = """cuda:1""" if torch.cuda.is_available() else """cpu"""
__lowerCamelCase = """cuda:2""" if torch.cuda.is_available() else """cpu"""
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = load_model_and_preprocess(
name=A__ , model_type=A__ , is_eval=A__ , device=A__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
__lowerCamelCase = original_model.state_dict()
__lowerCamelCase = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowerCamelCase = state_dict.pop(A__ )
if key.startswith("""Qformer.bert""" ):
__lowerCamelCase = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__lowerCamelCase = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
__lowerCamelCase = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
__lowerCamelCase = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
__lowerCamelCase = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
__lowerCamelCase = key.replace("""t5""" , """language""" )
__lowerCamelCase = val
# read in qv biases
read_in_q_v_bias(A__ , A__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(A__ , strict=A__ )
__lowerCamelCase = load_demo_image()
__lowerCamelCase = """What is unusual about this image?"""
# create processor
__lowerCamelCase = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=A__ , image_std=A__ )
__lowerCamelCase = InstructBlipProcessor(
image_processor=A__ , tokenizer=A__ , qformer_tokenizer=A__ , )
__lowerCamelCase = processor(images=A__ , text=A__ , return_tensors="""pt""" ).to(A__ )
# make sure processor creates exact same pixel values
__lowerCamelCase = vis_processors["""eval"""](A__ ).unsqueeze(0 ).to(A__ )
__lowerCamelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A__ )
original_model.to(A__ )
hf_model.to(A__ )
with torch.no_grad():
if "vicuna" in model_name:
__lowerCamelCase = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
__lowerCamelCase = hf_model(**A__ ).logits
else:
__lowerCamelCase = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
__lowerCamelCase = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(A__ )
__lowerCamelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
__lowerCamelCase = hf_model(**A__ , labels=A__ ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__lowerCamelCase = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , A__ , atol=A__ )
print("""Looks ok!""" )
print("""Generating with original model...""" )
__lowerCamelCase = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
__lowerCamelCase = hf_model.generate(
**A__ , do_sample=A__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__lowerCamelCase = 2
print("""Original generation:""" , A__ )
__lowerCamelCase = processor.batch_decode(A__ , skip_special_tokens=A__ )
__lowerCamelCase = [text.strip() for text in output_text]
print("""HF generation:""" , A__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if push_to_hub:
processor.push_to_hub(f'Salesforce/{model_name}' )
hf_model.push_to_hub(f'Salesforce/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
UpperCAmelCase_ = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
UpperCAmelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 12
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
| 1
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
SCREAMING_SNAKE_CASE_ : int = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 350
|
"""simple docstring"""
import sys
from collections import defaultdict
class a :
"""simple docstring"""
def __init__( self: Union[str, Any] ):
"""simple docstring"""
A__ = []
def UpperCamelCase ( self: List[str] , UpperCamelCase: int ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: str ):
"""simple docstring"""
A__ = pos
def UpperCamelCase ( self: Dict , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A__ = 2 * start + 1
else:
A__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
A__ , A__ = heap[smallest_child], positions[smallest_child]
A__ , A__ = (
heap[start],
positions[start],
)
A__ , A__ = temp, tempa
A__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase )
self.top_to_bottom(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = position[index]
while index != 0:
A__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A__ = heap[parent]
A__ = position[parent]
self.set_position(position[parent] , UpperCamelCase )
else:
A__ = val
A__ = temp
self.set_position(UpperCamelCase , UpperCamelCase )
break
A__ = parent
else:
A__ = val
A__ = temp
self.set_position(UpperCamelCase , 0 )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = len(UpperCamelCase ) // 2 - 1
for i in range(UpperCamelCase , -1 , -1 ):
self.top_to_bottom(UpperCamelCase , UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = positions[0]
A__ = sys.maxsize
self.top_to_bottom(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase )
return temp
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
A__ = Heap()
A__ = [0] * len(UpperCAmelCase_ )
A__ = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A__ = [] # Heap of Distance of vertices from their neighboring vertex
A__ = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
A__ = []
A__ = 1
A__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A__ = 0
A__ = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
A__ = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
A__ = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
SCREAMING_SNAKE_CASE_ : int = int(input('Enter number of edges: ').strip())
SCREAMING_SNAKE_CASE_ : str = defaultdict(list)
for _ in range(edges_number):
SCREAMING_SNAKE_CASE_ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 69
| 0
|
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[int] =[int(SCREAMING_SNAKE_CASE ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = input().strip()
UpperCAmelCase : Optional[int] = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 95
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ):
_snake_case = 1
_snake_case = 2
_snake_case = 0
_snake_case = 0
_snake_case = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 341
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCamelCase__ : int = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Any:
"""simple docstring"""
a = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
a = True
# Deal with multi-line cases
elif (
re.search(
rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""", snake_case_, )
is not None
):
a = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
a = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
a = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
a = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
a = True
if not attribute_used:
a = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
a = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
a = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
a = True
elif attribute.endswith('''_token_id''' ):
a = True
# configuration class specific cases
if not case_allowed:
a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
a = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = dict(inspect.signature(config_class.__init__ ).parameters )
a = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
a = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
a = {}
if len(config_class.attribute_map ) > 0:
a = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
a = inspect.getsourcefile(snake_case_ )
a = os.path.dirname(snake_case_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
a = [os.path.join(snake_case_, snake_case_ ) for fn in os.listdir(snake_case_ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
a = []
for path in modeling_paths:
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as fp:
modeling_sources.append(fp.read() )
a = []
for config_param, default_value in zip(snake_case_, snake_case_ ):
# `attributes` here is all the variant names for `config_param`
a = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case_, snake_case_, snake_case_, snake_case_ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
a = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda snake_case_ : inspect.isclass(snake_case_ )
and issubclass(snake_case_, snake_case_ )
and inspect.getmodule(snake_case_ ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
a = check_config_attributes_being_used(snake_case_ )
if len(snake_case_ ) > 0:
a = unused_attributes
if len(snake_case_ ) > 0:
a = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(snake_case_ )
if __name__ == "__main__":
check_config_attributes()
| 354
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
class lowerCamelCase_ :
def __init__( self : Dict ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = metric_id
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() )
@pytest.mark.parametrize(
'''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple:
"""simple docstring"""
if "tmp_path" in args:
a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ):
func(*snake_case_ )
| 330
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
a_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ):
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ):
if config_path is not None:
__lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCamelCase = UniSpeechSatConfig()
__lowerCamelCase = ''''''
if is_finetuned:
__lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase )
else:
__lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowerCamelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase ,_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330
| 1
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A = get_tests_dir("""fixtures""")
__A = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__A = get_tests_dir("""fixtures/dummy-config.json""")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 0
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase__ :Dict = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
lowerCAmelCase__ :Optional[Any] = WavaVecaFeatureExtractor(**__UpperCAmelCase )
# save in new folder
model_config.save_pretrained(__UpperCAmelCase )
config.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ :Optional[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase__ :int = AutoFeatureExtractor.from_pretrained('bert-base' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase , revision='aaaaaa' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowerCAmelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
lowerCAmelCase__ :Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def snake_case ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ :List[Any] = CustomFeatureExtractor.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self ):
'''simple docstring'''
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = True
try:
AutoConfig.register('custom' , __UpperCAmelCase )
AutoFeatureExtractor.register(__UpperCAmelCase , __UpperCAmelCase )
# If remote code is not set, the default is to use local
lowerCAmelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(__UpperCAmelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 254
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowerCAmelCase__ :Optional[int] = quote(_SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
| 254
| 1
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
snake_case : str = '''true'''
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple=8_2 , __lowerCAmelCase : List[Any]=1_6 ):
set_seed(4_2 )
a__ = RegressionModel()
a__ = deepcopy(_lowerCamelCase )
a__ = RegressionDataset(length=_lowerCamelCase )
a__ = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase )
model.to(accelerator.device )
a__ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
return model, ddp_model, dataloader
def __lowercase ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : List[Any]=False ):
a__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a__ = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(__lowerCAmelCase : Optional[Any] ):
a__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
with accelerator.main_process_first():
a__ = dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__lowerCAmelCase : Optional[Any] ):
if use_longest:
return tokenizer.pad(_lowerCamelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_lowerCamelCase , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(_lowerCamelCase , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=1_6 )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ):
a__ = Accelerator(dispatch_batches=_lowerCamelCase , split_batches=_lowerCamelCase )
a__ = get_dataloader(_lowerCamelCase , not dispatch_batches )
a__ = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_lowerCamelCase )
a__ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a__ = []
for batch in dataloader:
a__ = batch.values()
with torch.no_grad():
a__ = model(_lowerCamelCase )
a__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a__ = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCamelCase )
targs.append(_lowerCamelCase )
a__ = torch.cat(_lowerCamelCase ), torch.cat(_lowerCamelCase )
return logits, targs
def __lowercase ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : Dict=8_2 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Any=1_6 ):
a__ = get_basic_setup(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a__ = generate_predictions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
assert (
len(_lowerCamelCase ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCamelCase )}'
def __lowercase ( __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False ):
a__ = evaluate.load('glue' , 'mrpc' )
a__ = get_mrpc_setup(_lowerCamelCase , _lowerCamelCase )
# First do baseline
a__ = setup["""no"""]
model.to(_lowerCamelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCamelCase )
with torch.inference_mode():
a__ = model(**_lowerCamelCase )
a__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCamelCase , references=batch['labels'] )
a__ = metric.compute()
# Then do distributed
a__ = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a__ = model(**_lowerCamelCase )
a__ = outputs.logits.argmax(dim=-1 )
a__ = batch["""labels"""]
a__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCamelCase , references=_lowerCamelCase )
a__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __lowercase ( ):
a__ = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(_lowerCamelCase , _lowerCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a__ = Accelerator(split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(_lowerCamelCase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a__ = Accelerator()
test_torch_metrics(_lowerCamelCase , 5_1_2 )
accelerator.state._reset_state()
def __lowercase ( __lowerCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 240
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A( a , unittest.TestCase ):
snake_case_ = KandinskyVaaPipeline
snake_case_ = [
'''image_embeds''',
'''negative_image_embeds''',
]
snake_case_ = ['''image_embeds''', '''negative_image_embeds''']
snake_case_ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a = UNetaDConditionModel(**_snake_case )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__a = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.dummy_unet
__a = self.dummy_movq
__a = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_snake_case , set_alpha_to_one=_snake_case , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_snake_case , )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=0 ) -> str:
'''simple docstring'''
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_snake_case ) ).to(_snake_case )
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_snake_case )
if str(_snake_case ).startswith('''mps''' ):
__a = torch.manual_seed(_snake_case )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__a = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = '''cpu'''
__a = self.get_dummy_components()
__a = self.pipeline_class(**_snake_case )
__a = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__a = pipe(**self.get_dummy_inputs(_snake_case ) )
__a = output.images
__a = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
__a = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__a = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
__a = '''red cat, 4k photo'''
__a = torch.Generator(device='''cuda''' ).manual_seed(0 )
__a , __a = pipe_prior(
_snake_case , generator=_snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a = torch.Generator(device='''cuda''' ).manual_seed(0 )
__a = pipeline(
image_embeds=_snake_case , negative_image_embeds=_snake_case , generator=_snake_case , num_inference_steps=100 , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 33
|
import logging
from transformers.configuration_utils import PretrainedConfig
A : Union[str, Any] = logging.getLogger(__name__)
class __A( a ):
snake_case_ = '''masked_bert'''
def __init__( self , _snake_case=30_522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="topK" , _snake_case="constant" , _snake_case=0.0 , **_snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , **_snake_case )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = pruning_method
__a = mask_init
__a = mask_scale
| 33
| 1
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = """"""
for word_or_phrase in separated:
if not isinstance(snake_case__ , snake_case__ ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 64
|
"""simple docstring"""
from math import factorial
def UpperCAmelCase ( UpperCAmelCase = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(UpperCAmelCase ) / (factorial(UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 69
| 0
|
from collections.abc import Callable
import numpy as np
def __lowercase ( a__ , a__ , a__ , a__ , a__ ) -> np.array:
__SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE = ya
__SCREAMING_SNAKE_CASE = xa
for k in range(a__ ):
__SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(a__ , y[k] )
__SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(a__ , y[k] ) + ode_func(x + step_size , a__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
lowerCAmelCase__ : Optional[int] =True
from torch.cuda.amp import autocast
lowerCAmelCase__ : List[Any] =logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase__ : Optional[bool] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase__ : Optional[bool] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
UpperCamelCase__ : Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
UpperCamelCase__ : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def __lowercase ( a__ , a__ ) -> Dict:
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = logging.WARNING
if model_args.verbose_logging:
__SCREAMING_SNAKE_CASE = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__SCREAMING_SNAKE_CASE = logging.INFO
logger.setLevel(a__ )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
default=UpperCamelCase_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase__ : Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase__ : Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase__ : Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
UpperCamelCase__ : bool = field(
default=UpperCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase__ : Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase__ : Optional[int] = field(
default=UpperCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase__ : Optional[float] = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : WavaVecaForPreTraining
UpperCamelCase__ : WavaVecaFeatureExtractor
UpperCamelCase__ : Union[bool, str] = "longest"
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[int] = None
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extractor.pad(
_A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
__SCREAMING_SNAKE_CASE = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
__SCREAMING_SNAKE_CASE = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__SCREAMING_SNAKE_CASE = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_A , min_masks=2 , )
return batch
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , *_A , _A=1 , _A=0 , _A=1.0 , **_A ):
'''simple docstring'''
super().__init__(*_A , **_A )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = max_gumbel_temp
__SCREAMING_SNAKE_CASE = min_gumbel_temp
__SCREAMING_SNAKE_CASE = gumbel_temp_decay
def _A ( self , _A , _A ):
'''simple docstring'''
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
__SCREAMING_SNAKE_CASE = self.compute_loss(_A , _A )
else:
__SCREAMING_SNAKE_CASE = self.compute_loss(_A , _A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__SCREAMING_SNAKE_CASE = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __lowercase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
configure_logger(a__ , a__ )
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__SCREAMING_SNAKE_CASE = DatasetDict()
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__SCREAMING_SNAKE_CASE = DatasetDict()
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=a__ )
def prepare_dataset(a__ ):
# check that all files have the correct sampling rate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__SCREAMING_SNAKE_CASE = datasets.map(
a__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
__SCREAMING_SNAKE_CASE = vectorized_datasets.filter(
lambda a__ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(a__ ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__SCREAMING_SNAKE_CASE = vectorized_datasets.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(a__ )
__SCREAMING_SNAKE_CASE = DataCollatorForWavaVecaPretraining(model=a__ , feature_extractor=a__ )
__SCREAMING_SNAKE_CASE = WavaVecaPreTrainer(
model=a__ , data_collator=a__ , args=a__ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=a__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 118
| 1
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=99 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=5 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=None ,) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = parent
__SCREAMING_SNAKE_CASE :Tuple = batch_size
__SCREAMING_SNAKE_CASE :List[str] = seq_length
__SCREAMING_SNAKE_CASE :int = is_training
__SCREAMING_SNAKE_CASE :Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE :Any = use_token_type_ids
__SCREAMING_SNAKE_CASE :Any = use_labels
__SCREAMING_SNAKE_CASE :int = vocab_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE :int = num_hidden_layers
__SCREAMING_SNAKE_CASE :Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Tuple = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE :Dict = type_sequence_label_size
__SCREAMING_SNAKE_CASE :List[str] = initializer_range
__SCREAMING_SNAKE_CASE :Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE :Tuple = num_choices
__SCREAMING_SNAKE_CASE :Any = scope
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE :int = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE :Dict = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE :Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :List[Any] = None
__SCREAMING_SNAKE_CASE :Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE :Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE :Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,use_stable_embedding=__UpperCAmelCase ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = OpenLlamaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE :Tuple = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE :Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = True
__SCREAMING_SNAKE_CASE :Dict = OpenLlamaModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,)
__SCREAMING_SNAKE_CASE :int = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,)
__SCREAMING_SNAKE_CASE :str = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = OpenLlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE :str = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = True
__SCREAMING_SNAKE_CASE :int = True
__SCREAMING_SNAKE_CASE :Dict = OpenLlamaForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE :List[Any] = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,use_cache=__UpperCAmelCase ,)
__SCREAMING_SNAKE_CASE :str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE :int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__SCREAMING_SNAKE_CASE :Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE :List[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
__SCREAMING_SNAKE_CASE :Dict = torch.cat([input_mask, next_mask] ,dim=-1 )
__SCREAMING_SNAKE_CASE :Optional[int] = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,past_key_values=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE :Union[str, Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE :Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE :int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-3 ) )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) :Any = config_and_inputs
__SCREAMING_SNAKE_CASE :Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : int = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : str = False
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = OpenLlamaModelTester(self )
__SCREAMING_SNAKE_CASE :Optional[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE :Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE :Dict = 3
__SCREAMING_SNAKE_CASE :str = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE :Tuple = input_ids.ne(1 ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE :List[str] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :List[str] = OpenLlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE :Dict = 3
__SCREAMING_SNAKE_CASE :str = '''single_label_classification'''
__SCREAMING_SNAKE_CASE :Tuple = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE :str = input_ids.ne(1 ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE :Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :Tuple = OpenLlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE :Dict = 3
__SCREAMING_SNAKE_CASE :List[Any] = '''multi_label_classification'''
__SCREAMING_SNAKE_CASE :int = input_dict['''input_ids''']
__SCREAMING_SNAKE_CASE :Optional[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE :Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE :Any = OpenLlamaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE :Optional[int] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE :Optional[Any] = ids_tensor([1, 10] ,config.vocab_size )
__SCREAMING_SNAKE_CASE :List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE :Any = OpenLlamaModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
__SCREAMING_SNAKE_CASE :Union[str, Any] = original_model(__UpperCAmelCase ).last_hidden_state
__SCREAMING_SNAKE_CASE :int = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE :Optional[Any] = {'''type''': scaling_type, '''factor''': 1_0.0}
__SCREAMING_SNAKE_CASE :Dict = OpenLlamaModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
__SCREAMING_SNAKE_CASE :Any = scaled_model(__UpperCAmelCase ).last_hidden_state
__SCREAMING_SNAKE_CASE :Any = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1E-5 ) )
| 191
|
from __future__ import annotations
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = self
__lowerCamelCase = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# create a new set with x as its member
__lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
__lowerCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
__lowerCamelCase = nodea
else:
__lowerCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __UpperCAmelCase : x[2] )
# creating the disjoint set
__lowerCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCAmelCase )
# MST generation
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index]
index += 1
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase )
return graph
| 330
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Optional[int]=400 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=[0.5, 0.5, 0.5] , UpperCamelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=1 / 255 , UpperCamelCase__ : Optional[int]=True , ) -> Dict:
'''simple docstring'''
__UpperCamelCase =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean
__UpperCamelCase =image_std
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_pad
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
if not batched:
__UpperCamelCase =image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
__UpperCamelCase , __UpperCamelCase =image.size
else:
__UpperCamelCase , __UpperCamelCase =image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase =int(self.size['''shortest_edge'''] * h / w )
__UpperCamelCase =self.size['''shortest_edge''']
elif w > h:
__UpperCamelCase =self.size['''shortest_edge''']
__UpperCamelCase =int(self.size['''shortest_edge'''] * w / h )
else:
__UpperCamelCase =self.size['''shortest_edge''']
__UpperCamelCase =self.size['''shortest_edge''']
else:
__UpperCamelCase =[]
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase =max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
__UpperCamelCase =max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : str ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : str ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
__UpperCamelCase =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__UpperCamelCase , __UpperCamelCase =self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase =self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
__UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__UpperCamelCase , __UpperCamelCase =self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
__UpperCamelCase , __UpperCamelCase =self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__UpperCamelCase , __UpperCamelCase =self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase =image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
__UpperCamelCase , __UpperCamelCase =self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
__UpperCamelCase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__UpperCamelCase =json.loads(f.read() )
__UpperCamelCase ={'''image_id''': 39769, '''annotations''': target}
# encode them
__UpperCamelCase =ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
__UpperCamelCase =image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors='''pt''' )
# verify pixel values
__UpperCamelCase =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase__ )
__UpperCamelCase =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
__UpperCamelCase =torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase__ ) )
# verify boxes
__UpperCamelCase =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase__ )
__UpperCamelCase =torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
__UpperCamelCase =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase__ ) )
# verify is_crowd
__UpperCamelCase =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase__ ) )
# verify class_labels
__UpperCamelCase =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase__ ) )
# verify orig_size
__UpperCamelCase =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase__ ) )
# verify size
__UpperCamelCase =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase__ ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Dict:
'''simple docstring'''
__UpperCamelCase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__UpperCamelCase =json.loads(f.read() )
__UpperCamelCase ={'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
__UpperCamelCase =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__UpperCamelCase =ConditionalDetrImageProcessor(format='''coco_panoptic''' )
__UpperCamelCase =image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors='''pt''' )
# verify pixel values
__UpperCamelCase =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase__ )
__UpperCamelCase =torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
__UpperCamelCase =torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase__ ) )
# verify boxes
__UpperCamelCase =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase__ )
__UpperCamelCase =torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
__UpperCamelCase =torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase__ ) )
# verify is_crowd
__UpperCamelCase =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase__ ) )
# verify class_labels
__UpperCamelCase =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase__ ) )
# verify masks
__UpperCamelCase =822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase__ )
# verify orig_size
__UpperCamelCase =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase__ ) )
# verify size
__UpperCamelCase =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase__ ) )
| 85
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85
| 1
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
@staticmethod
def __A ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class _A ( unittest.TestCase ):
@require_torch
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
__UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__UpperCAmelCase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
__UpperCAmelCase : List[str] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
__UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase : List[Any] = image_classifier(__UpperCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
__UpperCAmelCase : List[str] = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
[
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
{"""score""": 0.333, """label""": ANY(__UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : str = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase : Any = image_classifier(__UpperCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
__UpperCAmelCase : Optional[int] = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase : List[Any] = image_classifier(__UpperCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
__UpperCAmelCase : List[str] = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 254
|
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int = 50 ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 254
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = 'trocr'
UpperCAmelCase_ : int = ['past_key_values']
UpperCAmelCase_ : Tuple = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , lowercase_ : Optional[int]=50_265 , lowercase_ : Tuple=1_024 , lowercase_ : Union[str, Any]=12 , lowercase_ : Tuple=16 , lowercase_ : Any=4_096 , lowercase_ : Optional[int]="gelu" , lowercase_ : int=512 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Dict=0.0 , lowercase_ : str=0.0 , lowercase_ : Dict=2 , lowercase_ : Dict=0.02 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=True , lowercase_ : str=False , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : List[str]=1 , lowercase_ : Optional[int]=0 , lowercase_ : str=2 , **lowercase_ : Optional[Any] , ) -> Dict:
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = decoder_layers
UpperCAmelCase : List[Any] = decoder_attention_heads
UpperCAmelCase : Dict = decoder_ffn_dim
UpperCAmelCase : Dict = activation_function
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : str = dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : str = init_std
UpperCAmelCase : Optional[int] = decoder_layerdrop
UpperCAmelCase : List[Any] = use_cache
UpperCAmelCase : Union[str, Any] = scale_embedding
UpperCAmelCase : List[Any] = use_learned_position_embeddings
UpperCAmelCase : str = layernorm_embedding
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 355
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase__ = 3
def UpperCamelCase( UpperCAmelCase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase : Union[str, Any] = random.randrange(3 , UpperCAmelCase_ )
if pow(UpperCAmelCase_ , 2 , UpperCAmelCase_ ) == 1:
continue
if pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) == 1:
continue
return g
def UpperCamelCase( UpperCAmelCase_ ):
print('Generating prime p...' )
UpperCAmelCase : str = rabin_miller.generate_large_prime(UpperCAmelCase_ ) # select large prime number.
UpperCAmelCase : List[str] = primitive_root(UpperCAmelCase_ ) # one primitive root on modulo p.
UpperCAmelCase : List[Any] = random.randrange(3 , UpperCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase : List[Any] = cryptomath.find_mod_inverse(pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
UpperCAmelCase : Tuple = (key_size, e_a, e_a, p)
UpperCAmelCase : Optional[int] = (key_size, d)
return public_key, private_key
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase : Dict = generate_key(UpperCAmelCase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def UpperCamelCase( ):
print('Making key files...' )
make_key_files('elgamal' , 20_48 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 280
| 0
|
"""simple docstring"""
def lowercase ( __snake_case : int , __snake_case : int ):
return int((input_a, input_a).count(0 ) != 0 )
def lowercase ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 33
|
"""simple docstring"""
def lowercase ( __snake_case : int ):
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.txt"}
UpperCamelCase__ = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
UpperCamelCase__ = {
"openbmb/cpm-ant-10b": 1_024,
}
def _UpperCamelCase (a__ :Tuple ):
"""simple docstring"""
UpperCamelCase__ = collections.OrderedDict()
with open(a__ , """r""" , encoding="""utf-8""" ) as reader:
UpperCamelCase__ = reader.readlines()
for index, token in enumerate(a__ ):
UpperCamelCase__ = token.rstrip("""\n""" )
UpperCamelCase__ = index
return vocab
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<unk>" , __lowerCAmelCase=200 ):
UpperCamelCase__ = vocab
UpperCamelCase__ = unk_token
UpperCamelCase__ = max_input_chars_per_word
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase__ = 0
UpperCamelCase__ = []
while start < len(__lowerCAmelCase ):
UpperCamelCase__ = len(__lowerCAmelCase )
UpperCamelCase__ = None
while start < end:
UpperCamelCase__ = """""".join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__lowerCAmelCase )
UpperCamelCase__ = end
return sub_tokens
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = VOCAB_FILES_NAMES
snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : Optional[int] = ["""input_ids""", """attention_mask"""]
snake_case : int = False
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<d>" , __lowerCAmelCase="</d>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="</n>" , __lowerCAmelCase="</_>" , __lowerCAmelCase="left" , **__lowerCAmelCase , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__lowerCAmelCase , eod_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , line_token=__lowerCAmelCase , space_token=__lowerCAmelCase , padding_side=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = bod_token
UpperCamelCase__ = eod_token
UpperCamelCase__ = load_vocab(__lowerCAmelCase )
UpperCamelCase__ = self.encoder[space_token]
UpperCamelCase__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCAmelCase : x[1] ) )
UpperCamelCase__ = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCamelCase ( self ):
return self.encoder[self.bod_token]
@property
def _lowerCamelCase ( self ):
return self.encoder[self.eod_token]
@property
def _lowerCamelCase ( self ):
return self.encoder["\n"]
@property
def _lowerCamelCase ( self ):
return len(self.encoder )
def _lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = []
for x in jieba.cut(__lowerCAmelCase , cut_all=__lowerCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowerCAmelCase ) )
return output_tokens
def _lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = [i for i in token_ids if i >= 0]
UpperCamelCase__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return token in self.encoder
def _lowerCamelCase ( self , __lowerCAmelCase ):
return "".join(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if os.path.isdir(__lowerCAmelCase ):
UpperCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
UpperCamelCase__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
UpperCamelCase__ = 0
if " " in self.encoder:
UpperCamelCase__ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase__ = self.encoder["""\n"""]
del self.encoder["\n"]
UpperCamelCase__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __lowerCAmelCase : x[1] ) )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
UpperCamelCase__ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase ))
return [1] + ([0] * len(__lowerCAmelCase ))
| 87
| 0
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : int = 13 , __magic_name__ : int = 64 , __magic_name__ : int = 2 , __magic_name__ : int = 3 , __magic_name__ : int = 3 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : int = 128 , __magic_name__ : str=[16, 32, 64, 128] , __magic_name__ : int = 7 , __magic_name__ : int = 4 , __magic_name__ : int = 37 , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 10 , __magic_name__ : float = 0.02 , __magic_name__ : int = 2 , __magic_name__ : int = 1 , __magic_name__ : int = 128 , __magic_name__ : List[int] = [2, 2, 2, 2] , __magic_name__ : int = 2 , __magic_name__ : int = 2 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
SCREAMING_SNAKE_CASE_ = num_attention_outputs
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = embed_dim + 1
SCREAMING_SNAKE_CASE_ = resolution
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = dim
SCREAMING_SNAKE_CASE_ = mlp_expansion_ratio
def __A ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def __A ( self : List[Any] ) -> Tuple:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __A ( self : Dict , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModel(config=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification(__magic_name__ )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def __A ( self : Dict ) -> Dict:
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def __A ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : List[str] ) -> Dict:
def check_hidden_states_output(__magic_name__ : Dict , __magic_name__ : int , __magic_name__ : List[str] ):
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) , training=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
SCREAMING_SNAKE_CASE_ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE_ = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE_ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ = outputs.decoder_hidden_states
self.asseretIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , "seq_length" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , "decoder_seq_length" , __magic_name__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def __A ( self : Tuple ) -> Optional[Any]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFEfficientFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def __A ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , "seq_length" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , "encoder_seq_length" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , "key_length" , __magic_name__ )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , "chunk_length" , __magic_name__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
SCREAMING_SNAKE_CASE_ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) , training=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) , training=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __A ( self : Union[str, Any] ) -> List[str]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE_ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__magic_name__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.assertTrue(outputs_dict is not None )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Optional[int] ) -> int:
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def __A ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ , training=__magic_name__ )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
@slow
def __A ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ , training=__magic_name__ )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 118
|
def a__ ( __UpperCamelCase ):
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE_ = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def a__ ( __UpperCamelCase = 1_0_0_0 ):
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 118
| 1
|
from random import randint, random
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : int = 5 , ):
__lowerCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
__lowerCAmelCase = 0
__lowerCAmelCase = max(__lowerCamelCase , 0 )
while i < number_of_cells:
__lowerCAmelCase = (
randint(0 , __lowerCamelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = 0
__lowerCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(__lowerCamelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__lowerCamelCase , -1 )
def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = len(__lowerCamelCase )
# Beforce calculations, the highway is empty
__lowerCAmelCase = [-1] * number_of_cells
for car_index in range(__lowerCamelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__lowerCAmelCase = min(highway_now[car_index] + 1 , __lowerCamelCase )
# Number of empty cell before the next car
__lowerCAmelCase = get_distance(__lowerCamelCase , __lowerCamelCase ) - 1
# We can't have the car causing an accident
__lowerCAmelCase = min(next_highway[car_index] , __lowerCamelCase )
if random() < probability:
# Randomly, a driver will slow down
__lowerCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = len(highway[0] )
for i in range(__lowerCamelCase ):
__lowerCAmelCase = update(highway[i] , __lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase = [-1] * number_of_cells
for car_index in range(__lowerCamelCase ):
__lowerCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__lowerCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
__lowerCAmelCase = speed
highway.append(__lowerCamelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a__ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=False , _A=True , _A=9_9 , _A=3_2 , _A=4 , _A=4 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=0.02 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = None
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
__lowerCAmelCase = vocab_size - 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(_A )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A )
__lowerCAmelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , attention_mask=_A , past_key_values=outputs_cache.past_key_values , position_ids=_A , )
__lowerCAmelCase = model(_A )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(_A )
__lowerCAmelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase = model.init_cache(input_ids.shape[0] , _A )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase = model(
input_ids[:, :-1] , attention_mask=_A , past_key_values=_A , position_ids=_A , )
__lowerCAmelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
__lowerCAmelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_A , position_ids=_A , )
__lowerCAmelCase = model(_A , attention_mask=_A )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_a : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = FlaxGPTJModelTester(self )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_A , _A , _A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_A , _A , _A , _A )
@tooslow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
__lowerCAmelCase = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_A , truncation=_A )
__lowerCAmelCase = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = False
__lowerCAmelCase = model.config.eos_token_id
__lowerCAmelCase = jax.jit(model.generate )
__lowerCAmelCase = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase = tokenizer.batch_decode(_A , skip_special_tokens=_A )
__lowerCAmelCase = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_A , _A )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(_A , _A )
__lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = pt_model_class(_A ).eval()
__lowerCAmelCase = model_class(_A , dtype=jnp.floataa )
__lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
__lowerCAmelCase = fx_state
with torch.no_grad():
__lowerCAmelCase = pt_model(**_A ).to_tuple()
__lowerCAmelCase = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
__lowerCAmelCase = model_class.from_pretrained(_A , from_pt=_A )
__lowerCAmelCase = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase = getattr(_A , _A )
__lowerCAmelCase = pt_model_class(_A ).eval()
__lowerCAmelCase = model_class(_A , dtype=jnp.floataa )
__lowerCAmelCase = load_flax_weights_in_pytorch_model(_A , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase = pt_inputs["input_ids"].shape
__lowerCAmelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 0
__lowerCAmelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase = pt_model(**_A ).to_tuple()
__lowerCAmelCase = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
__lowerCAmelCase = pt_model_class.from_pretrained(_A , from_flax=_A )
with torch.no_grad():
__lowerCAmelCase = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ) , len(_A ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A , _A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 102
| 0
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = 3
snake_case_ = 250
snake_case_ = ids_tensor((batch_size, length) , a__ )
snake_case_ = torch.ones((batch_size, length) , device=a__ , dtype=torch.float ) / length
return input_ids, scores
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ = self._get_tensors(5 )
snake_case_ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a__ , a__ ) )
snake_case_ , snake_case_ = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
snake_case_ , snake_case_ = self._get_tensors(10 )
self.assertTrue(criteria(a__ , a__ ) )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = MaxLengthCriteria(max_length=10 )
snake_case_ , snake_case_ = self._get_tensors(5 )
self.assertFalse(criteria(a__ , a__ ) )
snake_case_ , snake_case_ = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
snake_case_ , snake_case_ = self._get_tensors(10 )
self.assertTrue(criteria(a__ , a__ ) )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case_ , snake_case_ = self._get_tensors(5 )
self.assertFalse(criteria(a__ , a__ ) )
snake_case_ , snake_case_ = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
snake_case_ , snake_case_ = self._get_tensors(10 )
self.assertTrue(criteria(a__ , a__ ) )
snake_case_ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = self._get_tensors(5 )
snake_case_ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a__ , a__ ) )
snake_case_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a__ , a__ ) )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(a__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
snake_case_ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(a__ ) , 1 )
| 85
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase_ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Path , UpperCamelCase : Union[str, None] = None , UpperCamelCase : Union[List[str], None] = None , UpperCamelCase : Union[str, List[str], None] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : List[str] = [file for file in os.listdir(UpperCamelCase ) if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) )]
if identifier is not None:
_snake_case : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase , UpperCamelCase ):
for n_ in n_identifier:
_snake_case : Dict = [file for file in files if n_ not in file]
else:
_snake_case : Optional[int] = [file for file in files if n_identifier not in file]
_snake_case : List[Any] = ignore_files or []
ignore_files.append('__init__.py' )
_snake_case : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , UpperCamelCase )
if only_modules:
_snake_case : Any = file.split('.' )[0]
try:
_snake_case : List[Any] = getattr(UpperCamelCase , UpperCamelCase )
_snake_case : Any = doctest.DocTestSuite(UpperCamelCase )
_snake_case : Tuple = unittest.TextTestRunner().run(UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
_snake_case : Union[str, Any] = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Any = Path('src/transformers' )
_snake_case : Optional[int] = 'modeling'
_snake_case : Optional[Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase , ignore_files=UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = Path('src/transformers' )
_snake_case : Dict = 'tokenization'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = Path('src/transformers' )
_snake_case : Optional[int] = 'configuration'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Any = Path('src/transformers' )
_snake_case : List[str] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(UpperCamelCase , n_identifier=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = Path('docs/source' )
_snake_case : Optional[Any] = ['favicon.ico']
self.analyze_directory(UpperCamelCase , ignore_files=UpperCamelCase , only_modules=UpperCamelCase )
| 260
|
def lowerCamelCase_ ( lowerCAmelCase: int )-> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : str = logging.get_logger(__name__)
lowercase : List[str] = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __snake_case ( lowerCAmelCase ):
_a : str= "blip_2_vision_model"
def __init__( self ,snake_case=1408 ,snake_case=6144 ,snake_case=39 ,snake_case=16 ,snake_case=224 ,snake_case=14 ,snake_case="gelu" ,snake_case=0.00_001 ,snake_case=0.0 ,snake_case=1e-10 ,snake_case=True ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : str = hidden_size
lowercase : int = intermediate_size
lowercase : int = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : str = patch_size
lowercase : Union[str, Any] = image_size
lowercase : List[Any] = initializer_range
lowercase : Tuple = attention_dropout
lowercase : Optional[int] = layer_norm_eps
lowercase : List[Any] = hidden_act
lowercase : str = qkv_bias
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,**snake_case ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase : Union[str, Any] = cls.get_config_dict(snake_case ,**snake_case )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowercase : Optional[int] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(snake_case ,**snake_case )
class __snake_case ( lowerCAmelCase ):
_a : List[str]= "blip_2_qformer"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=0 ,snake_case="absolute" ,snake_case=2 ,snake_case=1408 ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,**snake_case )
lowercase : Optional[Any] = vocab_size
lowercase : int = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : Dict = intermediate_size
lowercase : Dict = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : Dict = initializer_range
lowercase : str = layer_norm_eps
lowercase : Union[str, Any] = position_embedding_type
lowercase : List[str] = cross_attention_frequency
lowercase : Union[str, Any] = encoder_hidden_size
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,**snake_case ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase : Optional[int] = cls.get_config_dict(snake_case ,**snake_case )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
lowercase : Tuple = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(snake_case ,**snake_case )
class __snake_case ( lowerCAmelCase ):
_a : Optional[int]= "blip-2"
_a : Optional[Any]= True
def __init__( self ,snake_case=None ,snake_case=None ,snake_case=None ,snake_case=32 ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
if vision_config is None:
lowercase : List[str] = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
lowercase : str = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
lowercase : str = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowercase : str = BlipaVisionConfig(**snake_case )
lowercase : Union[str, Any] = BlipaQFormerConfig(**snake_case )
lowercase : Optional[Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowercase : int = CONFIG_MAPPING[text_model_type](**snake_case )
lowercase : Optional[int] = self.text_config.tie_word_embeddings
lowercase : Dict = self.text_config.is_encoder_decoder
lowercase : List[Any] = num_query_tokens
lowercase : int = self.vision_config.hidden_size
lowercase : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase : List[str] = 1.0
lowercase : Union[str, Any] = 0.02
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,snake_case ,snake_case ,**snake_case ,):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**snake_case ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.vision_config.to_dict()
lowercase : Any = self.qformer_config.to_dict()
lowercase : Any = self.text_config.to_dict()
lowercase : int = self.__class__.model_type
return output
| 20
|
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ):
lowercase__ = "swinv2"
lowercase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , lowerCAmelCase_ : List[Any]=2_2_4 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[str]=9_6 , lowerCAmelCase_ : Dict=[2, 2, 6, 2] , lowerCAmelCase_ : Union[str, Any]=[3, 6, 1_2, 2_4] , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=4.0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Any=1E-5 , lowerCAmelCase_ : Any=3_2 , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**_snake_case)
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = len(_snake_case)
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ = int(embed_dim * 2 ** (len(_snake_case) - 1))
lowercase_ = (0, 0, 0, 0)
| 351
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313
| 0
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __a ( *__lowerCamelCase ):
with open(__lowerCamelCase, "r" ) as fh:
fcntl.flock(__lowerCamelCase, fcntl.LOCK_EX )
try:
print(*__lowerCamelCase )
finally:
fcntl.flock(__lowerCamelCase, fcntl.LOCK_UN )
_a = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
_a = torch.device('cuda', local_rank)
_a = socket.gethostname()
_a = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_a = dist.get_rank()
_a = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 61
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87
| 0
|
import math
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCAmelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 82
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : Dict = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a : str = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
a : Optional[int] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = SqueezeBertTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Optional[Any] = do_lower_case
UpperCAmelCase_: int = strip_accents
UpperCAmelCase_: int = tokenize_chinese_chars
UpperCAmelCase_: List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: List[Any] = [self.sep_token_id]
UpperCAmelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82
| 1
|
import datasets
from .evaluate import evaluate
lowerCAmelCase_ = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
lowerCAmelCase_ = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
lowerCAmelCase_ = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : str ) ->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : Any ) ->List[str]:
snake_case_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
snake_case_ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
snake_case_ = evaluate(dataset=_UpperCamelCase , predictions=_UpperCamelCase )
return score
| 8
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : int | str ) ->bool:
"""simple docstring"""
__snake_case : List[str] = str(_snake_case )
return n == n[::-1]
def lowercase ( _snake_case : int = 1_000_000 ) ->str:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
for i in range(1 , _snake_case ):
if is_palindrome(_snake_case ) and is_palindrome(bin(_snake_case ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 102
| 0
|
"""simple docstring"""
import itertools
import math
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __lowerCamelCase ( __UpperCamelCase = 10001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __UpperCamelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 161
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Union[str, Any] = """gpt_neo"""
a_ : List[Any] = ["""past_key_values"""]
a_ : Optional[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[int] , a_ : List[str]=5_02_57 , a_ : List[str]=20_48 , a_ : Union[str, Any]=20_48 , a_ : Union[str, Any]=24 , a_ : Optional[int]=[[["global", "local"], 12]] , a_ : str=16 , a_ : Optional[Any]=None , a_ : str=2_56 , a_ : Union[str, Any]="gelu_new" , a_ : Optional[int]=0.0 , a_ : Optional[Any]=0.0 , a_ : List[Any]=0.0 , a_ : List[Any]=0.1 , a_ : Optional[Any]=1e-5 , a_ : Optional[Any]=0.02 , a_ : int=True , a_ : Optional[Any]=5_02_56 , a_ : Tuple=5_02_56 , **a_ : str , ):
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_layers
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Union[str, Any] = window_size
lowerCAmelCase_ : Any = activation_function
lowerCAmelCase_ : str = resid_dropout
lowerCAmelCase_ : Union[str, Any] = embed_dropout
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : Dict = classifier_dropout
lowerCAmelCase_ : int = layer_norm_epsilon
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : str = eos_token_id
lowerCAmelCase_ : Optional[Any] = attention_types
lowerCAmelCase_ : Optional[Any] = self.expand_attention_types_params(a_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
@staticmethod
def lowerCamelCase ( a_ : Optional[Any] ):
lowerCAmelCase_ : int = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
import torch
lowerCAmelCase_ : str = input.size()
lowerCAmelCase_ : List[Any] = len(__UpperCamelCase )
lowerCAmelCase_ : Tuple = shape[dimension]
lowerCAmelCase_ : Tuple = torch.arange(0 , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : List[Any] = torch.div(sizedim - size , __UpperCamelCase , rounding_mode="floor" ) + 1
lowerCAmelCase_ : Dict = torch.arange(__UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCAmelCase_ : Tuple = [slice(__UpperCamelCase )] * rank
lowerCAmelCase_ : List[str] = indices
lowerCAmelCase_ : Dict = input[s]
lowerCAmelCase_ : Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
import torch
lowerCAmelCase_ : Optional[int] = torch.arange(1 , __UpperCamelCase )
lowerCAmelCase_ : Tuple = torch.remainder(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Tuple = remainders == 0
lowerCAmelCase_ : List[Any] = candidates[divisor_indices]
lowerCAmelCase_ : List[str] = torch.max(__UpperCamelCase )
return largest_divisor, torch.div(__UpperCamelCase , __UpperCamelCase , rounding_mode="floor" )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@property
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Any = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
lowerCAmelCase_ : int = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase ( self : int ):
return self._config.num_heads
def lowerCamelCase ( self : Optional[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : int = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : str = seqlen + 2
lowerCAmelCase_ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Tuple = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ : List[str] = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any] ):
return 13
| 161
| 1
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class _a :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] , __UpperCamelCase : float = 0.9_9_9_9 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[float, int] = 1.0 , __UpperCamelCase : Union[float, int] = 2 / 3 , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Dict[str, Any] = None , **__UpperCamelCase : Any , )->List[Any]:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase = True
if kwargs.get('''max_value''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''min_value''']
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
self.to(device=kwargs['''device'''] )
_UpperCAmelCase = None
_UpperCAmelCase = decay
_UpperCAmelCase = min_decay
_UpperCAmelCase = update_after_step
_UpperCAmelCase = use_ema_warmup
_UpperCAmelCase = inv_gamma
_UpperCAmelCase = power
_UpperCAmelCase = 0
_UpperCAmelCase = None # set in `step()`
_UpperCAmelCase = model_cls
_UpperCAmelCase = model_config
@classmethod
def lowercase__ ( cls : int , __UpperCamelCase : Dict , __UpperCamelCase : int )->"EMAModel":
_UpperCAmelCase , _UpperCAmelCase = model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase )
_UpperCAmelCase = model_cls.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCamelCase )
return ema_model
def lowercase__ ( self : Any , __UpperCamelCase : Tuple )->List[str]:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_UpperCAmelCase = self.model_cls.from_config(self.model_config )
_UpperCAmelCase = self.state_dict()
state_dict.pop('''shadow_params''' , __UpperCamelCase )
model.register_to_config(**__UpperCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int )->float:
_UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase = (1 + step) / (1_0 + step)
_UpperCAmelCase = min(__UpperCamelCase , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase = max(__UpperCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->int:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_UpperCAmelCase = parameters.parameters()
_UpperCAmelCase = list(__UpperCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase = self.get_decay(self.optimization_step )
_UpperCAmelCase = decay
_UpperCAmelCase = 1 - decay
_UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase = deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCamelCase )
def lowercase__ ( self : Any , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
_UpperCAmelCase = list(__UpperCamelCase )
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : Dict , __UpperCamelCase : Any=None , __UpperCamelCase : Tuple=None )->None:
_UpperCAmelCase = [
p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase )
for p in self.shadow_params
]
def lowercase__ ( self : str )->dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
_UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase = None
def lowercase__ ( self : Optional[int] , __UpperCamelCase : dict )->None:
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
_UpperCAmelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_UpperCAmelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __UpperCamelCase ):
raise ValueError('''Invalid min_decay''' )
_UpperCAmelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCamelCase ):
raise ValueError('''Invalid optimization_step''' )
_UpperCAmelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCamelCase ):
raise ValueError('''Invalid update_after_step''' )
_UpperCAmelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCamelCase ):
raise ValueError('''Invalid use_ema_warmup''' )
_UpperCAmelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_UpperCAmelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_UpperCAmelCase = state_dict.get('''shadow_params''' , __UpperCamelCase )
if shadow_params is not None:
_UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , __UpperCamelCase ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 260
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__A : str = sys.version_info >= (3, 10)
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """titi"""
UpperCamelCase__ = """toto"""
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """titi"""
UpperCamelCase__ = """toto"""
UpperCamelCase__ = 42
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
def lowercase__ ( self : Tuple )->Optional[int]:
_UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
def lowercase__ ( self : List[str] )->List[Any]:
_UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[1, 2, 3])
UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def lowercase__ ( self : int )->str:
_UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""})
UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''}
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->str:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase )
self.assertFalse(example.flag )
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Tuple )->List[str]:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase )
_UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) )
def lowercase__ ( self : Optional[Any] )->str:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowercase__ ( self : List[str] )->List[str]:
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = "toto"
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
_UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 4_2 )
def lowercase__ ( self : int )->int:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
__UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase )
_UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__UpperCamelCase )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) )
_UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : str )->List[Any]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , )
expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase )
self.argparsersEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
_UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->List[str]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 4_2,
}
self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
_UpperCAmelCase = {
'''foo''': 1_2,
'''bar''': 3.1_4,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' )
os.mkdir(__UpperCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
_UpperCAmelCase = BasicExample(**__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->List[str]:
_UpperCAmelCase = HfArgumentParser(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
| 260
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , A__=1000 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = seq_length
A__ : Any = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : Optional[Any] = vocab_size
A__ : str = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : Any = hidden_act
A__ : Any = hidden_dropout_prob
A__ : str = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : Dict = type_vocab_size
A__ : Dict = type_sequence_label_size
A__ : Optional[int] = initializer_range
A__ : List[str] = num_labels
A__ : Dict = num_choices
A__ : Union[str, Any] = scope
A__ : Any = range_bbox
def __A ( self ):
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A__ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ : Optional[int] = bbox[i, j, 3]
A__ : Tuple = bbox[i, j, 1]
A__ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ : str = bbox[i, j, 2]
A__ : Optional[int] = bbox[i, j, 0]
A__ : str = t
A__ : int = tf.convert_to_tensor(A__ )
A__ : str = None
if self.use_input_mask:
A__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A__ : int = None
if self.use_token_type_ids:
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any = None
A__ : List[str] = None
A__ : int = None
if self.use_labels:
A__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Any = TFLayoutLMModel(config=A__ )
A__ : Optional[Any] = model(A__ , A__ , attention_mask=A__ , token_type_ids=A__ )
A__ : Union[str, Any] = model(A__ , A__ , token_type_ids=A__ )
A__ : Dict = model(A__ , A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = TFLayoutLMForMaskedLM(config=A__ )
A__ : str = model(A__ , A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = self.num_labels
A__ : Optional[Any] = TFLayoutLMForSequenceClassification(config=A__ )
A__ : Optional[int] = model(A__ , A__ , attention_mask=A__ , token_type_ids=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = self.num_labels
A__ : List[str] = TFLayoutLMForTokenClassification(config=A__ )
A__ : Tuple = model(A__ , A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=A__ )
A__ : List[str] = model(A__ , A__ , attention_mask=A__ , token_type_ids=A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Any = config_and_inputs
A__ : Any = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase__: Optional[int] = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: Optional[Any] = False
UpperCAmelCase__: Optional[Any] = True
UpperCAmelCase__: Any = 10
def __A ( self ):
A__ : List[Any] = TFLayoutLMModelTester(self )
A__ : Union[str, Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
def __A ( self ):
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
@slow
def __A ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[int] = TFLayoutLMModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def __A ( self ):
pass
def UpperCamelCase () -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A__ : List[str] = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A__ : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A__ : Any = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A__ : Union[str, Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A__ : Dict = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Dict = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
A__ : Optional[int] = model(input_ids=A__ , bbox=A__ , attention_mask=A__ , token_type_ids=A__ )
# test the sequence output on [0, :3, :3]
A__ : Any = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
A__ : Tuple = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , A__ , atol=1e-3 ) )
@slow
def __A ( self ):
# initialize model with randomly initialized sequence classification head
A__ : List[Any] = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
A__ , A__ , A__ , A__ , A__ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A__ : Optional[int] = model(
input_ids=A__ , bbox=A__ , attention_mask=A__ , token_type_ids=A__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A__ : int = outputs.loss
A__ : Optional[Any] = (2,)
self.assertEqual(loss.shape , A__ )
# test the shape of the logits
A__ : str = outputs.logits
A__ : Optional[int] = (2, 2)
self.assertEqual(logits.shape , A__ )
@slow
def __A ( self ):
# initialize model with randomly initialized token classification head
A__ : List[Any] = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
A__ , A__ , A__ , A__ , A__ : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
A__ : Tuple = model(
input_ids=A__ , bbox=A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
# test the shape of the logits
A__ : Union[str, Any] = outputs.logits
A__ : Union[str, Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , A__ )
@slow
def __A ( self ):
# initialize model with randomly initialized token classification head
A__ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
A__ : List[Any] = model(input_ids=A__ , bbox=A__ , attention_mask=A__ , token_type_ids=A__ )
# test the shape of the logits
A__ : List[str] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , A__ )
self.assertEqual(outputs.end_logits.shape , A__ )
| 141
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = KandinskyVaaImgaImgPipeline
UpperCAmelCase__: Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase__: str = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase__: int = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__: Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Dict = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A__ : List[str] = UNetaDConditionModel(**A__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
A__ : Optional[int] = self.dummy_unet
A__ : Dict = self.dummy_movq
A__ : List[Any] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A__ : List[str] = DDIMScheduler(**A__ )
A__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , A__ , A__=0 ):
A__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A__ ) ).to(A__ )
A__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A__ )
# create init_image
A__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
A__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Dict = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(A__ ).startswith("""mps""" ):
A__ : Any = torch.manual_seed(A__ )
else:
A__ : List[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : Optional[int] = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
A__ : str = """cpu"""
A__ : Any = self.get_dummy_components()
A__ : Union[str, Any] = self.pipeline_class(**A__ )
A__ : List[str] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
A__ : Dict = pipe(**self.get_dummy_inputs(A__ ) )
A__ : Any = output.images
A__ : List[str] = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
A__ : Optional[int] = image[0, -3:, -3:, -1]
A__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
A__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A__ : str = """A red cartoon frog, 4k"""
A__ : int = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A__ )
A__ : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
A__ : List[str] = pipeline.to(A__ )
pipeline.set_progress_bar_config(disable=A__ )
A__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ , A__ : Optional[Any] = pipe_prior(
A__ , generator=A__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A__ : str = pipeline(
image=A__ , image_embeds=A__ , negative_image_embeds=A__ , generator=A__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
A__ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A__ , A__ )
| 141
| 1
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCamelCase = """true"""
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=82 , __lowerCamelCase : int=16 ):
set_seed(42 )
snake_case : Optional[int] = RegressionModel()
snake_case : Optional[Any] = deepcopy(__lowerCamelCase )
snake_case : Any = RegressionDataset(length=__lowerCamelCase )
snake_case : List[Any] = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
model.to(accelerator.device )
snake_case , snake_case : Tuple = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
return model, ddp_model, dataloader
def UpperCamelCase ( __lowerCamelCase : Accelerator , __lowerCamelCase : List[str]=False ):
snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
snake_case : Any = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(__lowerCamelCase : Optional[Any] ):
snake_case : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
with accelerator.main_process_first():
snake_case : Optional[Any] = dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
snake_case : Dict = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCamelCase : Any ):
if use_longest:
return tokenizer.pad(__lowerCamelCase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(__lowerCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(__lowerCamelCase , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=16 )
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict ):
snake_case : Optional[Any] = Accelerator(dispatch_batches=__lowerCamelCase , split_batches=__lowerCamelCase )
snake_case : List[str] = get_dataloader(__lowerCamelCase , not dispatch_batches )
snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=__lowerCamelCase )
snake_case , snake_case : str = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
snake_case : Optional[Any] = []
for batch in dataloader:
snake_case , snake_case : Optional[int] = batch.values()
with torch.no_grad():
snake_case : List[Any] = model(__lowerCamelCase )
snake_case , snake_case : Union[str, Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case , snake_case : str = [], []
for logit, targ in logits_and_targets:
logits.append(__lowerCamelCase )
targs.append(__lowerCamelCase )
snake_case , snake_case : Union[str, Any] = torch.cat(__lowerCamelCase ), torch.cat(__lowerCamelCase )
return logits, targs
def UpperCamelCase ( __lowerCamelCase : Accelerator , __lowerCamelCase : Any=82 , __lowerCamelCase : int=False , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=16 ):
snake_case , snake_case , snake_case : Optional[Any] = get_basic_setup(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
snake_case , snake_case : Dict = generate_predictions(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert (
len(__lowerCamelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowerCamelCase )}"""
def UpperCamelCase ( __lowerCamelCase : bool = False , __lowerCamelCase : bool = False ):
snake_case : Union[str, Any] = evaluate.load("glue" , "mrpc" )
snake_case , snake_case : int = get_mrpc_setup(__lowerCamelCase , __lowerCamelCase )
# First do baseline
snake_case , snake_case , snake_case : List[str] = setup["no"]
model.to(__lowerCamelCase )
model.eval()
for batch in dataloader:
batch.to(__lowerCamelCase )
with torch.inference_mode():
snake_case : int = model(**__lowerCamelCase )
snake_case : Optional[int] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowerCamelCase , references=batch["labels"] )
snake_case : Tuple = metric.compute()
# Then do distributed
snake_case , snake_case , snake_case : Optional[int] = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case : str = model(**__lowerCamelCase )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
snake_case : Optional[int] = batch["labels"]
snake_case , snake_case : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowerCamelCase , references=__lowerCamelCase )
snake_case : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def UpperCamelCase ( ):
snake_case : List[Any] = Accelerator(split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(__lowerCamelCase , __lowerCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case : Optional[Any] = Accelerator(split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(__lowerCamelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
snake_case : Optional[int] = Accelerator()
test_torch_metrics(__lowerCamelCase , 512 )
accelerator.state._reset_state()
def UpperCamelCase ( __lowerCamelCase : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 59
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 0
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowercase_ = logging.get_logger(__name__)
lowercase_ = {}
lowercase_ = {}
lowercase_ = {}
def lowercase ( lowerCAmelCase__ : type , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[List[str]] = None , ) -> List[Any]:
__a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__a = format_type
def lowercase ( lowerCAmelCase__ : Exception , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[List[str]] = None ) -> Any:
__a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
lowercase_ = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
lowercase_ = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
lowercase_ = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def lowercase ( lowerCAmelCase__ : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowercase ( lowerCAmelCase__ : Optional[str] , **lowerCAmelCase__ : Tuple ) -> Formatter:
__a = get_format_type_from_alias(lowerCAmelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 11
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11
| 1
|
A__ = [0, 2, 4, 6, 8]
A__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCAmelCase = 0
for digit in range(10 ):
_lowerCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , snake_case , snake_case )
return result
_lowerCAmelCase = 0
for digita in range(10 ):
_lowerCAmelCase = digita
if (remainder + digita) % 2 == 0:
_lowerCAmelCase = ODD_DIGITS
else:
_lowerCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case , snake_case , )
return result
def _UpperCAmelCase ( snake_case = 9 ):
"""simple docstring"""
_lowerCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(snake_case , 0 , [0] * length , snake_case )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 82
|
from __future__ import annotations
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
return n == n[::-1]
def _UpperCAmelCase ( snake_case = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = 0
for i in range(1 , snake_case ):
if is_palindrome(snake_case ) and is_palindrome(bin(snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 82
| 1
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_A )
class a ( _A ):
'''simple docstring'''
def __init__( self : List[Any] , **__snake_case : Optional[Any] ):
super().__init__(**__snake_case )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(__snake_case )
def lowerCamelCase_ ( self : Any , **__snake_case : Optional[Any] ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase_ = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
UpperCAmelCase_ = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
UpperCAmelCase_ = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase_ = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase_ = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase_ = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
UpperCAmelCase_ = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
UpperCAmelCase_ = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
UpperCAmelCase_ = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase_ = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
UpperCAmelCase_ = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase_ = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , __snake_case : Optional[int] , *__snake_case : Optional[int] , __snake_case : List[Any]=None , __snake_case : Tuple=None , **__snake_case : Dict ):
return super().__call__(__snake_case , *__snake_case , num_workers=__snake_case , batch_size=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : List[Any] , __snake_case : Dict , __snake_case : str=64 , __snake_case : int = 0 , __snake_case : float = 5_12 / 15_00 , __snake_case : Optional[int] = 32 , __snake_case : Optional[int] = 1 , ):
UpperCAmelCase_ = load_image(__snake_case )
UpperCAmelCase_ = self.image_processor.size['''longest_edge''']
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor.generate_crop_boxes(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = self.image_processor(images=__snake_case , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase_ = self.get_inference_context()
with inference_context():
UpperCAmelCase_ = self._ensure_tensor_on_device(__snake_case , device=self.device )
UpperCAmelCase_ = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
UpperCAmelCase_ = image_embeddings
UpperCAmelCase_ = grid_points.shape[1]
UpperCAmelCase_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , __snake_case , __snake_case ):
UpperCAmelCase_ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase_ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase_ ( self : int , __snake_case : List[str] , __snake_case : int=0.88 , __snake_case : Union[str, Any]=0.95 , __snake_case : List[str]=0 , __snake_case : Dict=1 , ):
UpperCAmelCase_ = model_inputs.pop('''input_boxes''' )
UpperCAmelCase_ = model_inputs.pop('''is_last''' )
UpperCAmelCase_ = model_inputs.pop('''original_sizes''' ).tolist()
UpperCAmelCase_ = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
UpperCAmelCase_ = self.model(**__snake_case )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase_ = model_outputs['''pred_masks''']
UpperCAmelCase_ = self.image_processor.post_process_masks(
__snake_case , __snake_case , __snake_case , __snake_case , binarize=__snake_case )
UpperCAmelCase_ = model_outputs['''iou_scores''']
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __snake_case , __snake_case , __snake_case , __snake_case , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase_ ( self : Any , __snake_case : Optional[int] , __snake_case : Tuple=False , __snake_case : Optional[int]=False , __snake_case : List[str]=0.7 , ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
UpperCAmelCase_ = torch.cat(__snake_case )
UpperCAmelCase_ = torch.cat(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor.post_process_for_mask_generation(
__snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = defaultdict(__snake_case )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__snake_case )
UpperCAmelCase_ = {}
if output_rle_mask:
UpperCAmelCase_ = rle_mask
if output_bboxes_mask:
UpperCAmelCase_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 177
|
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
# TODO: is there an appropriate internal test set?
UpperCAmelCase__ : Optional[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def lowercase_ ( self :Optional[Any] , _A :Any=0 ) -> Optional[Any]:
'''simple docstring'''
__A = floats_tensor((1, 3, 128, 128) , rng=random.Random(_A ) )
__A = torch.manual_seed(_A )
__A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self :Dict ) -> Dict:
'''simple docstring'''
__A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_A )
__A = self.get_dummy_inputs()
__A = pipe(**_A ).images
__A = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__A = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
__A = self.get_dummy_inputs()
__A = pipe(**_A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__A = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ ( self :Dict ) -> Dict:
'''simple docstring'''
__A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__A = self.get_dummy_inputs()
__A = pipe(**_A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__A = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ ( self :List[str] ) -> List[str]:
'''simple docstring'''
__A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__A = self.get_dummy_inputs()
__A = pipe(**_A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__A = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowercase_ ( self :Tuple ) -> int:
'''simple docstring'''
__A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
__A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__A = self.get_dummy_inputs()
__A = pipe(**_A ).images
__A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__A = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
@property
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self :List[Any] ) -> int:
'''simple docstring'''
__A = ort.SessionOptions()
__A = False
return options
def lowercase_ ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__A = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
__A = 'A fantasy landscape, trending on artstation'
__A = torch.manual_seed(0 )
__A = pipe(
prompt=_A , image=_A , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type='np' , )
__A = output.images
__A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__A = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__A = init_image.resize((128, 128) )
__A = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
__A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
__A = 'A fantasy landscape, trending on artstation'
__A = torch.manual_seed(0 )
__A = pipe(
prompt=_A , image=_A , guidance_scale=7.5 , num_inference_steps=20 , generator=_A , output_type='np' , )
__A = output.images
__A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__A = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 161
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a__ : Any = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :List[str] , **_A :Any ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self :Any , _A :Union[str, List[str], "Image", List["Image"]] , **_A :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(_A , **_A )
def lowercase_ ( self :Optional[int] , **_A :Dict ) -> Optional[Any]:
'''simple docstring'''
__A = {}
if "candidate_labels" in kwargs:
__A = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__A = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowercase_ ( self :Optional[int] , _A :str , _A :str=None , _A :Tuple="This is a photo of {}." ) -> Optional[int]:
'''simple docstring'''
__A = load_image(_A )
__A = self.image_processor(images=[image] , return_tensors=self.framework )
__A = candidate_labels
__A = [hypothesis_template.format(_A ) for x in candidate_labels]
__A = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__A = [text_inputs]
return inputs
def lowercase_ ( self :List[str] , _A :Tuple ) -> Tuple:
'''simple docstring'''
__A = model_inputs.pop('candidate_labels' )
__A = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__A = text_inputs[0]
else:
# Batching case.
__A = text_inputs[0][0]
__A = self.model(**_A , **_A )
__A = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowercase_ ( self :List[str] , _A :Optional[int] ) -> Dict:
'''simple docstring'''
__A = model_outputs.pop('candidate_labels' )
__A = model_outputs['logits'][0]
if self.framework == "pt":
__A = logits.softmax(dim=-1 ).squeeze(-1 )
__A = probs.tolist()
if not isinstance(_A , _A ):
__A = [scores]
elif self.framework == "tf":
__A = stable_softmax(_A , axis=-1 )
__A = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__A = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result
| 161
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Optional[int] , A : List[str]=7 , A : int=3 , A : Optional[Any]=3_0 , A : Tuple=4_0_0 , A : int=True , A : Union[str, Any]=None , A : Any=True , A : Union[str, Any]=1 / 2_5_5 , A : Optional[Any]=True , A : Union[str, Any]=[0.5, 0.5, 0.5] , A : Optional[Any]=[0.5, 0.5, 0.5] , A : Any=True , ):
'''simple docstring'''
a : Dict = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
a : List[str] = parent
a : Union[str, Any] = batch_size
a : str = num_channels
a : List[Any] = min_resolution
a : List[str] = max_resolution
a : Union[str, Any] = do_resize
a : Tuple = size
a : Union[str, Any] = do_rescale
a : int = rescale_factor
a : int = do_normalize
a : Any = image_mean
a : Dict = image_std
a : Tuple = do_pad
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCamelCase__ ( self : List[Any] , A : Optional[Any] , A : str=False ):
'''simple docstring'''
if not batched:
a : Tuple = image_inputs[0]
if isinstance(A , Image.Image ):
a : Union[str, Any] = image.size
else:
a : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
a : Optional[Any] = int(self.size['shortest_edge'] * h / w )
a : Any = self.size['shortest_edge']
elif w > h:
a : str = self.size['shortest_edge']
a : int = int(self.size['shortest_edge'] * w / h )
else:
a : Dict = self.size['shortest_edge']
a : Tuple = self.size['shortest_edge']
else:
a : Union[str, Any] = []
for image in image_inputs:
a : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a : str = max(A , key=lambda A : item[0] )[0]
a : Tuple = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = DetrImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : List[str] = DetrImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , 'image_mean' ) )
self.assertTrue(hasattr(A , 'image_std' ) )
self.assertTrue(hasattr(A , 'do_normalize' ) )
self.assertTrue(hasattr(A , 'do_rescale' ) )
self.assertTrue(hasattr(A , 'rescale_factor' ) )
self.assertTrue(hasattr(A , 'do_resize' ) )
self.assertTrue(hasattr(A , 'size' ) )
self.assertTrue(hasattr(A , 'do_pad' ) )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
a : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , A )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
a : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
a : Any = image_processing(A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
a : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a : Tuple = image_processing(A , return_tensors='pt' ).pixel_values
a : Optional[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
a : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
a : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a : List[str] = image_processing(A , return_tensors='pt' ).pixel_values
a : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
a : List[Any] = json.loads(f.read() )
a : Tuple = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
a : List[str] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
a : Tuple = image_processing(images=A , annotations=A , return_tensors='pt' )
# verify pixel values
a : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , A )
a : Tuple = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
a : Optional[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A ) )
# verify boxes
a : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A )
a : Union[str, Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A , atol=1E-3 ) )
# verify image_id
a : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A ) )
# verify is_crowd
a : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A ) )
# verify class_labels
a : Dict = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A ) )
# verify orig_size
a : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A ) )
# verify size
a : int = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A ) )
@slow
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
a : Dict = json.loads(f.read() )
a : Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
a : Dict = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
a : Union[str, Any] = image_processing(images=A , annotations=A , masks_path=A , return_tensors='pt' )
# verify pixel values
a : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , A )
a : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A , atol=1E-4 ) )
# verify area
a : Optional[int] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A ) )
# verify boxes
a : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A )
a : Dict = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A , atol=1E-3 ) )
# verify image_id
a : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A ) )
# verify is_crowd
a : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A ) )
# verify class_labels
a : Dict = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A ) )
# verify masks
a : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A )
# verify orig_size
a : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A ) )
# verify size
a : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A ) )
| 357
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
a : int = AutoTokenizer.from_pretrained('xlm-roberta-base' )
a : int = 'The dog is cute and lives in the garden house'
a : List[Any] = jnp.array([tokenizer.encode(A )] )
a : int = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
a : Dict = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
a : Any = model(A )['last_hidden_state']
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A , atol=1E-3 ) )
| 186
| 0
|
'''simple docstring'''
from collections import deque
class lowerCAmelCase :
def __init__( self : Tuple , __lowercase : str , __lowercase : int , __lowercase : int ):
"""simple docstring"""
__lowercase =process_name # process name
__lowercase =arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowercase =arrival_time
__lowercase =burst_time # remaining burst time
__lowercase =0 # total time of the process wait in ready queue
__lowercase =0 # time from arrival time to completion time
class lowerCAmelCase :
def __init__( self : int , __lowercase : int , __lowercase : list[int] , __lowercase : deque[Process] , __lowercase : int , ):
"""simple docstring"""
__lowercase =number_of_queues
# time slice of queues that round robin algorithm applied
__lowercase =time_slices
# unfinished process is in this ready_queue
__lowercase =queue
# current time
__lowercase =current_time
# finished process is in this sequence queue
__lowercase =deque()
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =[]
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case ( self : Optional[Any] , __lowercase : list[Process] ):
"""simple docstring"""
__lowercase =[]
for i in range(len(__lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case ( self : Tuple , __lowercase : list[Process] ):
"""simple docstring"""
__lowercase =[]
for i in range(len(__lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case ( self : Tuple , __lowercase : list[Process] ):
"""simple docstring"""
__lowercase =[]
for i in range(len(__lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case ( self : List[str] , __lowercase : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def snake_case ( self : str , __lowercase : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case ( self : str , __lowercase : deque[Process] ):
"""simple docstring"""
__lowercase =deque() # sequence deque of finished process
while len(__lowercase ) != 0:
__lowercase =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowercase =0
# set the process's turnaround time because it is finished
__lowercase =self.current_time - cp.arrival_time
# set the completion time
__lowercase =self.current_time
# add the process to queue that has finished queue
finished.append(__lowercase )
self.finish_queue.extend(__lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case ( self : str , __lowercase : deque[Process] , __lowercase : int ):
"""simple docstring"""
__lowercase =deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowercase ) ):
__lowercase =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowercase =self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowercase =0
# set the finish time
__lowercase =self.current_time
# update the process' turnaround time because it is finished
__lowercase =self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowercase )
self.finish_queue.extend(__lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case ( self : Optional[int] ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
__lowercase , __lowercase =self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
UpperCAmelCase = Process('''P1''', 0, 53)
UpperCAmelCase = Process('''P2''', 0, 17)
UpperCAmelCase = Process('''P3''', 0, 68)
UpperCAmelCase = Process('''P4''', 0, 24)
UpperCAmelCase = 3
UpperCAmelCase = [17, 25]
UpperCAmelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
UpperCAmelCase = Process('''P1''', 0, 53)
UpperCAmelCase = Process('''P2''', 0, 17)
UpperCAmelCase = Process('''P3''', 0, 68)
UpperCAmelCase = Process('''P4''', 0, 24)
UpperCAmelCase = 3
UpperCAmelCase = [17, 25]
UpperCAmelCase = deque([Pa, Pa, Pa, Pa])
UpperCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0)
UpperCAmelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 141
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
if isinstance(lowercase__, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase__, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = ["pixel_values"]
def __init__( self : Union[str, Any] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowercase )
__lowercase =size if size is not None else {'shortest_edge': 224}
__lowercase =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowercase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowercase =get_size_dict(__lowercase , param_name='crop_size' )
__lowercase =do_resize
__lowercase =size
__lowercase =do_center_crop
__lowercase =crop_size
__lowercase =resample
__lowercase =do_rescale
__lowercase =rescale_factor
__lowercase =do_normalize
__lowercase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase =image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
__lowercase =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" in size:
__lowercase =get_resize_output_image_size(__lowercase , size['shortest_edge'] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
__lowercase =(size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
"""simple docstring"""
__lowercase =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__lowercase , size=(size['height'], size['width']) , data_format=__lowercase , **__lowercase )
def snake_case ( self : str , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
"""simple docstring"""
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case ( self : Dict , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case ( self : Optional[Any] , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowercase =to_numpy_array(__lowercase )
if do_resize:
__lowercase =self.resize(image=__lowercase , size=__lowercase , resample=__lowercase )
if do_center_crop:
__lowercase =self.center_crop(__lowercase , size=__lowercase )
if do_rescale:
__lowercase =self.rescale(image=__lowercase , scale=__lowercase )
if do_normalize:
__lowercase =self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase )
__lowercase =to_channel_dimension_format(__lowercase , __lowercase )
return image
def snake_case ( self : Union[str, Any] , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : Tuple , ):
"""simple docstring"""
__lowercase =do_resize if do_resize is not None else self.do_resize
__lowercase =resample if resample is not None else self.resample
__lowercase =do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase =do_rescale if do_rescale is not None else self.do_rescale
__lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase =do_normalize if do_normalize is not None else self.do_normalize
__lowercase =image_mean if image_mean is not None else self.image_mean
__lowercase =image_std if image_std is not None else self.image_std
__lowercase =size if size is not None else self.size
__lowercase =get_size_dict(__lowercase , default_to_square=__lowercase )
__lowercase =crop_size if crop_size is not None else self.crop_size
__lowercase =get_size_dict(__lowercase , param_name='crop_size' )
if not valid_images(__lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__lowercase =make_batched(__lowercase )
__lowercase =[
[
self._preprocess_image(
image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , )
for img in video
]
for video in videos
]
__lowercase ={'pixel_values': videos}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 141
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = KandinskyVaaPipeline
__A : Any = [
"image_embeds",
"negative_image_embeds",
]
__A : Tuple = ["image_embeds", "negative_image_embeds"]
__A : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__A : Union[str, Any] = False
@property
def __snake_case ( self : str ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 1_0_0
@property
def __snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Optional[Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase :int = UNetaDConditionModel(**snake_case__ )
return model
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = self.dummy_unet
lowercase :List[Any] = self.dummy_movq
lowercase :Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
lowercase :str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __snake_case ( self : str , snake_case__ : Any , snake_case__ : str=0 ):
'''simple docstring'''
lowercase :Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase :Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
lowercase :Optional[int] = torch.manual_seed(snake_case__ )
else:
lowercase :Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase :List[Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[Any] = '''cpu'''
lowercase :Tuple = self.get_dummy_components()
lowercase :Any = self.pipeline_class(**snake_case__ )
lowercase :List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase :Optional[Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase :str = output.images
lowercase :Dict = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase :Any = image[0, -3:, -3:, -1]
lowercase :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase :List[Any] = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowercase :int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase :Tuple = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase :str = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase :int = '''red cat, 4k photo'''
lowercase :str = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase , lowercase :Union[str, Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase :Tuple = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase :List[Any] = pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_0_0 , output_type='''np''' , )
lowercase :Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 172
| 0
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11
|
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11
| 1
|
from math import factorial
class a__ :
def __init__( self , A , A ) -> Tuple:
'''simple docstring'''
a = real
if isinstance(A , A ):
a = [1] * rank
else:
a = rank
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return (
F'''{self.real}+'''
F'''{"+".join(str(A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , A )
def __add__( self , A ) -> Any:
'''simple docstring'''
if not isinstance(A , A ):
return Dual(self.real + other , self.duals )
a = self.duals.copy()
a = other.duals.copy()
if len(A ) > len(A ):
o_dual.extend([1] * (len(A ) - len(A )) )
elif len(A ) < len(A ):
s_dual.extend([1] * (len(A ) - len(A )) )
a = []
for i in range(len(A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , A )
a : Tuple = __add__
def __sub__( self , A ) -> Optional[Any]:
'''simple docstring'''
return self + other * -1
def __mul__( self , A ) -> int:
'''simple docstring'''
if not isinstance(A , A ):
a = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , A )
a = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , A )
a : int = __mul__
def __truediv__( self , A ) -> Optional[int]:
'''simple docstring'''
if not isinstance(A , A ):
a = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , A )
raise ValueError
def __floordiv__( self , A ) -> List[str]:
'''simple docstring'''
if not isinstance(A , A ):
a = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , A )
raise ValueError
def __pow__( self , A ) -> Optional[Any]:
'''simple docstring'''
if n < 0 or isinstance(A , A ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
a = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Optional[Any]:
if not callable(__UpperCamelCase):
raise ValueError("differentiate() requires a function as input for func")
if not isinstance(__UpperCamelCase , (float, int)):
raise ValueError("differentiate() requires a float as input for position")
if not isinstance(__UpperCamelCase , __UpperCamelCase):
raise ValueError("differentiate() requires an int as input for order")
a = Dual(__UpperCamelCase , 1)
a = func(__UpperCamelCase)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__UpperCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Optional[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 180
|
from math import isqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCamelCase) + 1))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 10**6) -> int:
a = 0
a = 1
a = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 180
| 1
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A = False
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: List[str] = '''A painting of a squirrel eating a burger '''
lowercase__: str = torch.manual_seed(0 )
lowercase__: Union[str, Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__: Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = generator.manual_seed(0 )
lowercase__: List[str] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Tuple = '''A painting of a squirrel eating a burger '''
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: Tuple = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase__: Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 177
|
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# Initialise PyTorch model
lowercase__: Optional[Any] = FunnelConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__: List[Any] = FunnelBaseModel(__UpperCAmelCase ) if base_model else FunnelModel(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 177
| 1
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCAmelCase = 1_0
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
for i in range(__a , __a ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Union[str, Any] = 0
_a : List[Any] = len(__a )
while left <= right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : Tuple = (left + right) // 3 + 1
_a : str = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_a : Optional[int] = one_third - 1
elif array[two_third] < target:
_a : Tuple = two_third + 1
else:
_a : Optional[Any] = one_third + 1
_a : Any = two_third - 1
else:
return -1
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : List[str] = (left + right) // 3 + 1
_a : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__a , one_third - 1 , __a , __a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __a , __a , __a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __a , __a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
__lowerCAmelCase = ite_ternary_search(collection, target)
__lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 353
|
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5
| 0
|
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : List[str] = "" , lowerCAmelCase : str = False) -> None:
"""simple docstring"""
_snake_case : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_snake_case : Union[str, Any] = is_leaf
_snake_case : Optional[Any] = prefix
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> tuple[str, str, str]:
"""simple docstring"""
_snake_case : Optional[int] = 0
for q, w in zip(self.prefix , _SCREAMING_SNAKE_CASE):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase_ ( self : str , lowerCAmelCase : Optional[Any]) -> None:
"""simple docstring"""
for word in words:
self.insert(_SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
if self.prefix == word:
_snake_case : List[str] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_snake_case : Any = RadixNode(prefix=_SCREAMING_SNAKE_CASE , is_leaf=_SCREAMING_SNAKE_CASE)
else:
_snake_case : Tuple = self.nodes[word[0]]
_snake_case : Any = incoming_node.match(
_SCREAMING_SNAKE_CASE)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_SCREAMING_SNAKE_CASE)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_snake_case : List[str] = remaining_prefix
_snake_case : Dict = self.nodes[matching_string[0]]
_snake_case : List[str] = RadixNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
_snake_case : Union[str, Any] = aux_node
if remaining_word == "":
_snake_case : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(_SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tuple) -> bool:
"""simple docstring"""
_snake_case : Any = self.nodes.get(word[0] , _SCREAMING_SNAKE_CASE)
if not incoming_node:
return False
else:
_snake_case : str = incoming_node.match(
_SCREAMING_SNAKE_CASE)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Union[str, Any]) -> bool:
"""simple docstring"""
_snake_case : Optional[int] = self.nodes.get(word[0] , _SCREAMING_SNAKE_CASE)
if not incoming_node:
return False
else:
_snake_case : Tuple = incoming_node.match(
_SCREAMING_SNAKE_CASE)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_SCREAMING_SNAKE_CASE)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
_snake_case : Optional[int] = list(self.nodes.values())[0]
_snake_case : Any = merging_node.is_leaf
self.prefix += merging_node.prefix
_snake_case : Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
_snake_case : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
_snake_case : Dict = list(incoming_node.nodes.values())[0]
_snake_case : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_snake_case : Optional[Any] = merging_node.nodes
return True
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : Union[str, Any] = 0) -> None:
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""")
for value in self.nodes.values():
value.print_tree(height + 1)
def lowercase ( ) -> Any:
_snake_case : Any = '''banana bananas bandana band apple all beast'''.split()
_snake_case : List[str] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE__ )
assert all(root.find(SCREAMING_SNAKE_CASE__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowercase ( ) -> Dict:
assert test_trie()
def lowercase ( ) -> Any:
_snake_case : str = RadixNode()
_snake_case : Dict = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(SCREAMING_SNAKE_CASE__ )
print("""Words:""" , SCREAMING_SNAKE_CASE__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 317
|
from math import isclose, sqrt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[str] = point_y / 4 / point_x
A_ : Union[str, Any] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A_ : int = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A_ : List[str] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A_ : List[str] = outgoing_gradient**2 + 4
A_ : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A_ : Any = (point_y - outgoing_gradient * point_x) ** 2 - 100
A_ : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A_ : Any = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A_ : int = x_minus if isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else x_plus
A_ : Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1.4 , SCREAMING_SNAKE_CASE = -9.6 ):
A_ : int = 0
A_ : float = first_x_coord
A_ : float = first_y_coord
A_ : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
A_ , A_ , A_ : List[str] = next_point(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 186
| 0
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
# Base Case
if curr_ind == len(_lowercase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_lowercase ) ):
if valid_connection(_lowercase , _lowercase , _lowercase , _lowercase ):
# Insert current vertex into path as next transition
UpperCAmelCase : Optional[int] = next_ver
# Validate created path
if util_hamilton_cycle(_lowercase , _lowercase , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase : Union[str, Any] = -1
return False
def __lowerCamelCase ( _lowercase , _lowercase = 0 ) -> list[int]:
UpperCAmelCase : Tuple = [-1] * (len(_lowercase ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_lowercase , _lowercase , 1 ) else []
| 354
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=0.0_1 , A=1000 ) -> List[str]:
UpperCAmelCase : List[Any] = p_stop
UpperCAmelCase : Optional[int] = max_length
def __iter__( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase : Any = random.random() < self.p_stop
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A , A=False , A=True ) -> Union[str, Any]:
UpperCAmelCase : List[str] = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
UpperCAmelCase : List[str] = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def _lowercase( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : List[Any] = [[], []]
self.check_batch_sampler_shards(A , A )
def _lowercase( self ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
UpperCAmelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
UpperCAmelCase : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
UpperCAmelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
UpperCAmelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
UpperCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Any = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def _lowercase( self ) -> Any:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
UpperCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : str = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Tuple = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def _lowercase( self ) -> List[Any]:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
UpperCAmelCase : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
UpperCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
UpperCAmelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
UpperCAmelCase : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCAmelCase : List[str] = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _lowercase( self , A , A , A , A=False , A=2 , A=False ) -> Tuple:
random.seed(A )
UpperCAmelCase : Dict = list(A )
UpperCAmelCase : Any = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
UpperCAmelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
UpperCAmelCase : Optional[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase : List[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
UpperCAmelCase : List[Any] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def _lowercase( self ) -> str:
UpperCAmelCase : Tuple = 42
UpperCAmelCase : List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
UpperCAmelCase : List[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = BatchSampler(range(16 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Any = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCAmelCase : Optional[Any] = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowercase( self ) -> Dict:
Accelerator()
UpperCAmelCase : Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 338
| 0
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
UpperCAmelCase = 11
UpperCAmelCase = int('1' + '0' * digit_len )
for num in range(lowercase_ , lowercase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase_ , lowercase_ ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
UpperCAmelCase = 10
return solutions
def _lowerCAmelCase ( lowercase_ = 2 ):
UpperCAmelCase = 1.0
for fraction in fraction_list(lowercase_ ):
UpperCAmelCase = Fraction(lowercase_ )
result *= frac.denominator / frac.numerator
return int(lowercase_ )
if __name__ == "__main__":
print(solution())
| 78
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a : Any= "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 172
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["LayoutLMv2FeatureExtractor"]
_lowerCAmelCase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("fixtures")
_lowerCAmelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCAmelCase = get_tests_dir("fixtures/dummy-config.json")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = 0
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__magic_name__ = WavaVecaFeatureExtractor(**a__ )
# save in new folder
model_config.save_pretrained(a__ )
config.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
# make sure private variable is not incorrectly saved
__magic_name__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , revision='''aaaaaa''' )
def snake_case__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def snake_case__ ( self : int ):
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoFeatureExtractor.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : int ):
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = True
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# If remote code is not set, the default is to use local
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(a__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 98
| 0
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_SCREAMING_SNAKE_CASE = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE = logging.getLogger()
def snake_case ( ) -> List[str]:
_A = argparse.ArgumentParser()
parser.add_argument("""-f""")
_A = parser.parse_args()
return args.f
def snake_case ( snake_case__ :Any , snake_case__ :Any="eval") -> List[str]:
_A = os.path.join(snake_case__ , F'''{split}_results.json''')
if os.path.exists(snake_case__):
with open(snake_case__ , """r""") as f:
return json.load(snake_case__)
raise ValueError(F'''can\'t find {path}''')
_SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_auto_remove_tmp_dir()
_A = F'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
run_flax_glue.main()
_A = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Any:
_A = self.get_auto_remove_tmp_dir()
_A = F'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
run_clm_flax.main()
_A = get_results(lowerCAmelCase_ )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_auto_remove_tmp_dir()
_A = F'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
run_summarization_flax.main()
_A = get_results(lowerCAmelCase_ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_auto_remove_tmp_dir()
_A = F'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
run_mlm_flax.main()
_A = get_results(lowerCAmelCase_ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_auto_remove_tmp_dir()
_A = F'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
run_ta_mlm_flax.main()
_A = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> str:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_A = 7 if get_gpu_count() > 1 else 2
_A = self.get_auto_remove_tmp_dir()
_A = F'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
run_flax_ner.main()
_A = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Any:
_A = self.get_auto_remove_tmp_dir()
_A = F'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowerCAmelCase_ , """argv""" , lowerCAmelCase_ ):
run_qa.main()
_A = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 180
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = '''bridgetower_vision_model'''
def __init__( self , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=2_88 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> Any:
super().__init__(**lowerCAmelCase_ )
_A = hidden_size
_A = num_hidden_layers
_A = num_channels
_A = patch_size
_A = image_size
_A = initializer_factor
_A = layer_norm_eps
_A = stop_gradient
_A = share_layernorm
_A = remove_last_layer
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''bridgetower_text_model'''
def __init__( self , lowerCAmelCase_=5_02_65 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_14 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = initializer_factor
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = pad_token_id
_A = bos_token_id
_A = eos_token_id
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ) -> "PretrainedConfig":
_A , _A = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("""model_type""" ) == "bridgetower":
_A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = '''bridgetower'''
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=7_68 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-05 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
# TODO: remove this once the Hub files are updated.
_A = kwargs.pop("""text_config_dict""" , lowerCAmelCase_ )
_A = kwargs.pop("""vision_config_dict""" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
_A = share_cross_modal_transformer_layers
_A = hidden_act
_A = hidden_size
_A = initializer_factor
_A = layer_norm_eps
_A = share_link_tower_layers
_A = link_tower_type
_A = num_attention_heads
_A = num_hidden_layers
_A = tie_word_embeddings
_A = init_layernorm_from_vision_encoder
if text_config is None:
_A = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_A = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_A = BridgeTowerTextConfig(**lowerCAmelCase_ )
_A = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = copy.deepcopy(self.__dict__ )
_A = self.text_config.to_dict()
_A = self.vision_config.to_dict()
_A = self.__class__.model_type
return output
| 180
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = ['''pixel_values''']
def __init__( self : Optional[Any] , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Dict[str, int]] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_5_6}
_SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase )
return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : ImageInput , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[float] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE = get_size_dict(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
_SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 352
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''linear'''
lowerCamelCase_ = '''cosine'''
lowerCamelCase_ = '''cosine_with_restarts'''
lowerCamelCase_ = '''polynomial'''
lowerCamelCase_ = '''constant'''
lowerCamelCase_ = '''constant_with_warmup'''
lowerCamelCase_ = '''piecewise_constant'''
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int = -1 ) -> Optional[int]:
return LambdaLR(__A , lambda __A : 1 , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int = -1 ) -> Dict:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1.0 , __A ) )
return 1.0
return LambdaLR(__A , __A , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : str , __A : int = -1 ) -> Tuple:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = step_rules.split("," )
for rule_str in rule_list[:-1]:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = rule_str.split(":" )
_SCREAMING_SNAKE_CASE = int(__A )
_SCREAMING_SNAKE_CASE = float(__A )
_SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__A : Tuple , __A : List[Any] ):
def rule_func(__A : int ) -> float:
_SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_SCREAMING_SNAKE_CASE = create_rules_function(__A , __A )
return LambdaLR(__A , __A , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[Any] , __A : List[str] , __A : Union[str, Any]=-1 ) -> str:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int , __A : float = 0.5 , __A : int = -1 ) -> Any:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
_SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__A ) * 2.0 * progress )) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int , __A : int = 1 , __A : int = -1 ) -> str:
def lr_lambda(__A : Optional[int] ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
_SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__A ) * progress) % 1.0) )) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[Any] , __A : List[str] , __A : Any=1e-7 , __A : Optional[Any]=1.0 , __A : List[Any]=-1 ) -> Tuple:
_SCREAMING_SNAKE_CASE = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_SCREAMING_SNAKE_CASE = lr_init - lr_end
_SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
_SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
_SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__A , __A , __A )
lowerCamelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, SchedulerType] , __A : Optimizer , __A : Optional[str] = None , __A : Optional[int] = None , __A : Optional[int] = None , __A : int = 1 , __A : float = 1.0 , __A : int = -1 , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = SchedulerType(__A )
_SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__A , last_epoch=__A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__A , step_rules=__A , last_epoch=__A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__A , num_warmup_steps=__A , last_epoch=__A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , num_cycles=__A , last_epoch=__A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , power=__A , last_epoch=__A , )
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , last_epoch=__A )
| 111
| 0
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Dict = 2
while i * i <= n:
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Any = 1
while True:
i += 1
t_num += i
if count_divisors(__snake_case ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 313
|
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
return choice(lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = random_pivot(lowerCAmelCase )
# partition based on pivot
# linear time
_lowerCAmelCase = [e for e in lst if e < pivot]
_lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCAmelCase ) < k - 1:
return kth_number(lowerCAmelCase , k - len(lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : Optional[Any] , **__snake_case : Any ) -> Tuple:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = vqa_pipeline(__snake_case , top_k=1 )
self.assertEqual(
__snake_case , [
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
] , )
@require_torch
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
@slow
@require_torch
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
pass
| 220
| 1
|
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = 0,snake_case_ = 0 ):
_A : Optional[int] = right or len(snake_case_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case_,snake_case_,left + 1,right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=32 ,_lowerCamelCase=3 ,_lowerCamelCase=4 ,_lowerCamelCase=[10, 20, 30, 40] ,_lowerCamelCase=[2, 2, 3, 2] ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=10 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=["stage2", "stage3", "stage4"] ,_lowerCamelCase=[2, 3, 4] ,_lowerCamelCase=None ,) -> str:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_lowerCamelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = ConvNextVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = ConvNextVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a : Union[str, Any] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a : List[str] = False
a : List[str] = False
a : int = False
a : Union[str, Any] = False
a : int = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = ConvNextVaModelTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,has_text_modality=_lowerCamelCase ,hidden_size=37 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase = True
if model_class.__name__ in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]:
continue
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
__lowercase = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ,return_labels=_lowerCamelCase )
__lowercase = model(**_lowerCamelCase ).loss
loss.backward()
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase = False
__lowercase = True
if (
model_class.__name__
in [*get_values(_lowerCamelCase ), *get_values(_lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__lowercase = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ,return_labels=_lowerCamelCase )
__lowercase = model(**_lowerCamelCase ).loss
loss.backward()
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _lowerCAmelCase ( ):
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = preprocessor(images=_lowerCamelCase ,return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_lowerCamelCase )
__lowercase = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCamelCase ,atol=1E-4 ) )
| 217
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=sys.maxsize ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''bilinear'''
__lowercase = max_size
__lowercase = short_edge_length
def __call__(self ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = []
for img in imgs:
__lowercase , __lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowercase = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowercase = size * 1.0 / min(_lowerCamelCase ,_lowerCamelCase )
if h < w:
__lowercase , __lowercase = size, scale * w
else:
__lowercase , __lowercase = scale * h, size
if max(_lowerCamelCase ,_lowerCamelCase ) > self.max_size:
__lowercase = self.max_size * 1.0 / max(_lowerCamelCase ,_lowerCamelCase )
__lowercase = newh * scale
__lowercase = neww * scale
__lowercase = int(neww + 0.5 )
__lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowercase = Image.fromarray(_lowerCamelCase )
__lowercase = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR )
__lowercase = np.asarray(_lowerCamelCase )
else:
__lowercase = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowercase = nn.functional.interpolate(
_lowerCamelCase ,(newh, neww) ,mode=self.interp_method ,align_corners=_lowerCamelCase ).squeeze(0 )
img_augs.append(_lowerCamelCase )
return img_augs
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST )
__lowercase = cfg.INPUT.FORMAT
__lowercase = cfg.SIZE_DIVISIBILITY
__lowercase = cfg.PAD_VALUE
__lowercase = cfg.INPUT.MAX_SIZE_TEST
__lowercase = cfg.MODEL.DEVICE
__lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 )
__lowercase = lambda _lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = tuple(max(_lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
__lowercase = [im.shape[-2:] for im in images]
__lowercase = [
nn.functional.pad(
_lowerCamelCase ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,)
for size, im in zip(_lowerCamelCase ,_lowerCamelCase )
]
return torch.stack(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
def __call__(self ,_lowerCamelCase ,_lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = [images]
if single_image:
assert len(_lowerCamelCase ) == 1
for i in range(len(_lowerCamelCase ) ):
if isinstance(images[i] ,torch.Tensor ):
images.insert(_lowerCamelCase ,images.pop(_lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] ,torch.Tensor ):
images.insert(
_lowerCamelCase ,torch.as_tensor(img_tensorize(images.pop(_lowerCamelCase ) ,input_format=self.input_format ) )
.to(self.device )
.float() ,)
# resize smallest edge
__lowercase = torch.tensor([im.shape[:2] for im in images] )
__lowercase = self.aug(_lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowercase = [self.normalizer(_lowerCamelCase ) for x in images]
# now pad them to do the following operations
__lowercase , __lowercase = self.pad(_lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowercase = torch.true_divide(_lowerCamelCase ,_lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple[int, int] ):
assert torch.isfinite(lowerCamelCase_ ).all(), "Box tensor contains infinite or NaN!"
__lowercase , __lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCamelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCamelCase_ )
| 217
| 1
|
"""simple docstring"""
import numpy
class _UpperCAmelCase :
def __init__( self : List[Any] , A : numpy.ndarray , A : numpy.ndarray ) -> None:
lowercase_ : Union[str, Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowercase_ : Optional[int] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowercase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowercase_ : Optional[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowercase_ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowercase_ : Dict = numpy.zeros(output_array.shape )
def A ( self : Union[str, Any] ) -> numpy.ndarray:
lowercase_ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowercase_ : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowercase_ : Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A ( self : Optional[int] ) -> None:
lowercase_ : Any = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowercase_ : int = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowercase_ : Dict = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A ( self : str , A : numpy.ndarray , A : int , A : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
lowercase_ : int = self.feedforward()
self.back_propagation()
if give_loss:
lowercase_ : Optional[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def A ( self : Optional[Any] , A : numpy.ndarray ) -> int:
lowercase_ : Optional[int] = input_arr
lowercase_ : int = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowercase_ : Dict = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowercase_ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase ( __snake_case : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def lowercase ( __snake_case : numpy.ndarray ):
return (value) * (1 - (value))
def lowercase ( ):
lowercase_ : List[str] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowercase_ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowercase_ : str = TwoHiddenLayerNeuralNetwork(
input_array=__snake_case , output_array=__snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__snake_case , iterations=1_0 , give_loss=__snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 33
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98
| 0
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 362
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : int = logging.get_logger(__name__)
A__ : Optional[int] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __snake_case ( __lowerCamelCase ):
_a = '''deformable_detr'''
_a = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , A_ : Optional[int]=True , A_ : List[Any]=None , A_ : List[str]=3 , A_ : str=3_0_0 , A_ : List[Any]=1_0_2_4 , A_ : int=6 , A_ : Union[str, Any]=1_0_2_4 , A_ : str=8 , A_ : Union[str, Any]=6 , A_ : Union[str, Any]=1_0_2_4 , A_ : Any=8 , A_ : str=0.0 , A_ : Any=True , A_ : Dict="relu" , A_ : List[str]=2_5_6 , A_ : Any=0.1 , A_ : Optional[Any]=0.0 , A_ : Optional[Any]=0.0 , A_ : str=0.02 , A_ : Tuple=1.0 , A_ : List[Any]=True , A_ : Any=False , A_ : int="sine" , A_ : Optional[Any]="resnet50" , A_ : Tuple=True , A_ : Dict=False , A_ : Tuple=4 , A_ : str=4 , A_ : int=4 , A_ : Union[str, Any]=False , A_ : Union[str, Any]=3_0_0 , A_ : Optional[int]=False , A_ : List[str]=1 , A_ : str=5 , A_ : str=2 , A_ : Optional[Any]=1 , A_ : Any=1 , A_ : List[str]=5 , A_ : List[str]=2 , A_ : Any=0.1 , A_ : Union[str, Any]=0.25 , A_ : List[Any]=False , **A_ : List[str] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
lowerCAmelCase_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=['''stage4'''])
elif isinstance(A_ , A_):
lowerCAmelCase_ : int = backbone_config.get('''model_type''')
lowerCAmelCase_ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : str = config_class.from_dict(A_)
lowerCAmelCase_ : Optional[int] = use_timm_backbone
lowerCAmelCase_ : Optional[Any] = backbone_config
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : Dict = num_queries
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Dict = encoder_ffn_dim
lowerCAmelCase_ : Any = encoder_layers
lowerCAmelCase_ : str = encoder_attention_heads
lowerCAmelCase_ : Tuple = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = decoder_layers
lowerCAmelCase_ : int = decoder_attention_heads
lowerCAmelCase_ : Tuple = dropout
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : List[Any] = activation_dropout
lowerCAmelCase_ : Any = activation_function
lowerCAmelCase_ : Dict = init_std
lowerCAmelCase_ : Optional[int] = init_xavier_std
lowerCAmelCase_ : int = encoder_layerdrop
lowerCAmelCase_ : str = auxiliary_loss
lowerCAmelCase_ : List[Any] = position_embedding_type
lowerCAmelCase_ : List[Any] = backbone
lowerCAmelCase_ : Optional[Any] = use_pretrained_backbone
lowerCAmelCase_ : List[str] = dilation
# deformable attributes
lowerCAmelCase_ : Optional[Any] = num_feature_levels
lowerCAmelCase_ : int = encoder_n_points
lowerCAmelCase_ : Any = decoder_n_points
lowerCAmelCase_ : Optional[Any] = two_stage
lowerCAmelCase_ : Dict = two_stage_num_proposals
lowerCAmelCase_ : Any = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
lowerCAmelCase_ : int = class_cost
lowerCAmelCase_ : Any = bbox_cost
lowerCAmelCase_ : Any = giou_cost
# Loss coefficients
lowerCAmelCase_ : List[Any] = mask_loss_coefficient
lowerCAmelCase_ : Any = dice_loss_coefficient
lowerCAmelCase_ : List[Any] = bbox_loss_coefficient
lowerCAmelCase_ : Union[str, Any] = giou_loss_coefficient
lowerCAmelCase_ : Optional[Any] = eos_coefficient
lowerCAmelCase_ : Optional[int] = focal_alpha
lowerCAmelCase_ : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=A_ , **A_)
@property
def UpperCAmelCase__ ( self : str):
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : List[Any]):
return self.d_model
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
lowerCAmelCase_ : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase_ : List[Any] = self.__class__.model_type
return output
| 103
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__snake_case: Dict = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = """sgugger/tiny-distilbert-classification"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
__snake_case: Any = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: int = TensorFlowBenchmark(A )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: int = AutoConfig.from_pretrained(A )
__snake_case: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A , [config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
__snake_case: Tuple = AutoConfig.from_pretrained(A )
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = TensorFlowBenchmark(A , [config] )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = """sshleifer/tiny-gpt2"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = AutoConfig.from_pretrained(A )
__snake_case: Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[str] = TensorFlowBenchmark(A , [config] )
__snake_case: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = """patrickvonplaten/t5-tiny-random"""
__snake_case: List[str] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Optional[int] = TensorFlowBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = """sshleifer/tiny-gpt2"""
__snake_case: List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(A , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(A , """env.csv""" ) , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A : Union[str, Any] ):
self.assertTrue(hasattr(A , """sequential""" ) )
self.assertTrue(hasattr(A , """cumulative""" ) )
self.assertTrue(hasattr(A , """current""" ) )
self.assertTrue(hasattr(A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , """log.txt""" ) , log_print=A , trace_memory_line_by_line=A , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(A , """log.txt""" ) ).exists() )
| 111
| 0
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 192
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 192
| 1
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class a ( a_ ):
def __init__( self , _lowerCamelCase ):
super().__init__()
lowercase = nn.ModuleList(_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = True , ):
for i, (image, scale, controlnet) in enumerate(zip(_lowerCamelCase , _lowerCamelCase , self.nets ) ):
lowercase , lowercase = controlnet(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# merge samples
if i == 0:
lowercase , lowercase = down_samples, mid_sample
else:
lowercase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_lowerCamelCase , _lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , ):
lowercase = 0
lowercase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_lowerCamelCase , is_main_process=_lowerCamelCase , save_function=_lowerCamelCase , safe_serialization=_lowerCamelCase , variant=_lowerCamelCase , )
idx += 1
lowercase = model_path_to_save + F'_{idx}'
@classmethod
def UpperCamelCase_ ( cls , _lowerCamelCase , **_lowerCamelCase ):
lowercase = 0
lowercase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowercase = pretrained_model_path
while os.path.isdir(_lowerCamelCase ):
lowercase = ControlNetModel.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
controlnets.append(_lowerCamelCase )
idx += 1
lowercase = pretrained_model_path + F'_{idx}'
logger.info(F'{len(_lowerCamelCase )} controlnets loaded from {pretrained_model_path}.' )
if len(_lowerCamelCase ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(_lowerCamelCase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(_lowerCamelCase )
| 220
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = parent
lowercase = config_class
lowercase = has_text_modality
lowercase = kwargs
lowercase = common_properties
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F'`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F'`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = os.path.join(_lowerCamelCase , 'config.json' )
config_first.to_json_file(_lowerCamelCase )
lowercase = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
lowercase = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
lowercase = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowercase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCamelCase_ ( self ):
if self.config_class.is_composition:
return
lowercase = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = copy.deepcopy(_lowerCamelCase )
lowercase = self.config_class(**_lowerCamelCase )
lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
lowercase = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def UpperCamelCase_ ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 220
| 1
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = '''▁'''
__lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = BertGenerationTokenizer
a__ : Dict = False
a__ : str = True
def UpperCamelCase__ ( self) -> List[str]:
super().setUp()
__UpperCamelCase :Optional[Any] = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Dict = '''<s>'''
__UpperCamelCase :Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase) , __lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase) , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''<pad>''')
self.assertEqual(len(__lowercase) , 1_002)
def UpperCamelCase__ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase)
__UpperCamelCase :Optional[Any] = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase) , [285, 46, 10, 170, 382] , )
__UpperCamelCase :List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCamelCase :Any = tokenizer.convert_tokens_to_ids(__lowercase)
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(__lowercase)
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase__ ( self) -> int:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''')
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = '''Hello World!'''
__UpperCamelCase :Optional[int] = [18_536, 2_260, 101]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase))
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__UpperCamelCase :Union[str, Any] = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase))
@require_torch
@slow
def UpperCamelCase__ ( self) -> Optional[int]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__UpperCamelCase :Optional[Any] = list(self.big_tokenizer.get_vocab().keys())[:10]
__UpperCamelCase :Optional[int] = ''' '''.join(__lowercase)
__UpperCamelCase :Optional[int] = self.big_tokenizer.encode_plus(__lowercase , return_tensors='''pt''' , return_token_type_ids=__lowercase)
__UpperCamelCase :List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__lowercase)
__UpperCamelCase :List[Any] = BertGenerationConfig()
__UpperCamelCase :Optional[Any] = BertGenerationEncoder(__lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase)
model(**__lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
# fmt: off
__UpperCamelCase :List[str] = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 353
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , ) -> Optional[Any]:
__UpperCamelCase :Optional[int] = parent
__UpperCamelCase :Optional[int] = 13
__UpperCamelCase :Dict = 7
__UpperCamelCase :Optional[int] = True
__UpperCamelCase :List[str] = True
__UpperCamelCase :List[Any] = True
__UpperCamelCase :Union[str, Any] = True
__UpperCamelCase :Any = True
__UpperCamelCase :Optional[int] = False
__UpperCamelCase :Any = False
__UpperCamelCase :str = False
__UpperCamelCase :Optional[Any] = 2
__UpperCamelCase :Optional[int] = 99
__UpperCamelCase :Any = 0
__UpperCamelCase :List[Any] = 32
__UpperCamelCase :int = 2
__UpperCamelCase :Optional[Any] = 4
__UpperCamelCase :Dict = 0.1
__UpperCamelCase :Optional[Any] = 0.1
__UpperCamelCase :str = 512
__UpperCamelCase :Any = 16
__UpperCamelCase :str = 2
__UpperCamelCase :Dict = 0.02
__UpperCamelCase :List[Any] = 3
__UpperCamelCase :Optional[int] = 4
__UpperCamelCase :Tuple = '''last'''
__UpperCamelCase :Any = True
__UpperCamelCase :Union[str, Any] = None
__UpperCamelCase :Tuple = 0
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa)
__UpperCamelCase :Tuple = None
if self.use_input_lengths:
__UpperCamelCase :List[str] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase :Any = None
if self.use_token_type_ids:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__UpperCamelCase :Dict = None
__UpperCamelCase :List[str] = None
__UpperCamelCase :Union[str, Any] = None
if self.use_labels:
__UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa)
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Optional[int] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[Any]:
__UpperCamelCase :str = TFFlaubertModel(config=__lowercase)
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__UpperCamelCase :Optional[int] = model(__lowercase)
__UpperCamelCase :Optional[int] = [input_ids, input_mask]
__UpperCamelCase :Union[str, Any] = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> int:
__UpperCamelCase :str = TFFlaubertWithLMHeadModel(__lowercase)
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__UpperCamelCase :Optional[int] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :Any = TFFlaubertForQuestionAnsweringSimple(__lowercase)
__UpperCamelCase :str = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__UpperCamelCase :int = model(__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :int = TFFlaubertForSequenceClassification(__lowercase)
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__UpperCamelCase :Any = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
__UpperCamelCase :Optional[int] = self.num_labels
__UpperCamelCase :int = TFFlaubertForTokenClassification(config=__lowercase)
__UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[Any]:
__UpperCamelCase :Any = self.num_choices
__UpperCamelCase :Any = TFFlaubertForMultipleChoice(config=__lowercase)
__UpperCamelCase :List[str] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Optional[Any] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Tuple = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCamelCase :List[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Any = config_and_inputs
__UpperCamelCase :int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ : Tuple = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ : Tuple = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : Tuple = False
a__ : Any = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = TFFlaubertModelTester(self)
__UpperCamelCase :Dict = ConfigTester(self , config_class=__lowercase , emb_dim=37)
def UpperCamelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__lowercase)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Union[str, Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Any = TFFlaubertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''')
__UpperCamelCase :Tuple = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase :Union[str, Any] = model(__lowercase)[0]
__UpperCamelCase :str = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape , __lowercase)
# compare the actual values for a slice.
__UpperCamelCase :Optional[int] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 105
| 0
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case :
def __init__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=sys.maxsize)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = "bilinear"
__lowerCAmelCase: Dict = max_size
__lowerCAmelCase: List[Any] = short_edge_length
def __call__( self : Any , UpperCamelCase__ : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Any = []
for img in imgs:
__lowerCAmelCase , __lowerCAmelCase: int = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowerCAmelCase: Tuple = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
__lowerCAmelCase: List[Any] = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__)
if h < w:
__lowerCAmelCase , __lowerCAmelCase: Tuple = size, scale * w
else:
__lowerCAmelCase , __lowerCAmelCase: str = scale * h, size
if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size:
__lowerCAmelCase: Union[str, Any] = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Optional[int] = newh * scale
__lowerCAmelCase: str = neww * scale
__lowerCAmelCase: Union[str, Any] = int(neww + 0.5)
__lowerCAmelCase: int = int(newh + 0.5)
if img.dtype == np.uinta:
__lowerCAmelCase: List[Any] = Image.fromarray(UpperCamelCase__)
__lowerCAmelCase: str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
__lowerCAmelCase: List[Any] = np.asarray(UpperCamelCase__)
else:
__lowerCAmelCase: int = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
__lowerCAmelCase: Tuple = nn.functional.interpolate(
UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0)
img_augs.append(UpperCamelCase__)
return img_augs
class snake_case :
def __init__( self : int , UpperCamelCase__ : List[str])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Tuple = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
__lowerCAmelCase: Optional[int] = cfg.INPUT.FORMAT
__lowerCAmelCase: Union[str, Any] = cfg.SIZE_DIVISIBILITY
__lowerCAmelCase: Dict = cfg.PAD_VALUE
__lowerCAmelCase: List[str] = cfg.INPUT.MAX_SIZE_TEST
__lowerCAmelCase: List[Any] = cfg.MODEL.DEVICE
__lowerCAmelCase: Any = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__lowerCAmelCase: Dict = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
__lowerCAmelCase: Union[str, Any] = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std
def lowercase_ ( self : Any , UpperCamelCase__ : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: str = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images]))
__lowerCAmelCase: Optional[Any] = [im.shape[-2:] for im in images]
__lowerCAmelCase: Optional[Any] = [
nn.functional.pad(
UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase__ , UpperCamelCase__)
]
return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__)
def __call__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int=False)-> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: List[Any] = [images]
if single_image:
assert len(UpperCamelCase__) == 1
for i in range(len(UpperCamelCase__)):
if isinstance(images[i] , torch.Tensor):
images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
__lowerCAmelCase: Tuple = torch.tensor([im.shape[:2] for im in images])
__lowerCAmelCase: Optional[int] = self.aug(UpperCamelCase__)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowerCAmelCase: Optional[Any] = [self.normalizer(UpperCamelCase__) for x in images]
# now pad them to do the following operations
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.pad(UpperCamelCase__)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowerCAmelCase: str = torch.true_divide(UpperCamelCase__ , UpperCamelCase__)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
assert torch.isfinite(__SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
__lowerCAmelCase , __lowerCAmelCase: str = box_size
tensor[:, 0].clamp_(min=0 , max=__SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=__SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=__SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=__SCREAMING_SNAKE_CASE )
| 217
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """switch_transformers"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : List[str] , UpperCamelCase__ : List[str]=3_2_1_2_8 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : Optional[int]=2_0_4_8 , UpperCamelCase__ : Dict=6_4 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Dict=1_2 , UpperCamelCase__ : List[str]=8 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=0.01 , UpperCamelCase__ : Optional[int]="float32" , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : Union[str, Any]=1_2_8 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=1e-6 , UpperCamelCase__ : Optional[Any]=0.001 , UpperCamelCase__ : Dict=0.001 , UpperCamelCase__ : int=1.0 , UpperCamelCase__ : str="relu" , UpperCamelCase__ : int=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str=1 , **UpperCamelCase__ : Tuple , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = vocab_size
__lowerCAmelCase: str = d_model
__lowerCAmelCase: str = d_kv
__lowerCAmelCase: str = d_ff
__lowerCAmelCase: List[str] = num_sparse_encoder_layers
__lowerCAmelCase: List[Any] = num_layers
__lowerCAmelCase: Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCAmelCase: Tuple = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__lowerCAmelCase: int = self.num_layers // self.num_sparse_encoder_layers
else:
__lowerCAmelCase: Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__lowerCAmelCase: Dict = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__lowerCAmelCase: Any = self.num_decoder_layers # HACK: this will create 0 sparse layers
__lowerCAmelCase: Dict = num_heads
__lowerCAmelCase: Dict = num_experts
__lowerCAmelCase: Any = expert_capacity
__lowerCAmelCase: List[Any] = router_bias
__lowerCAmelCase: int = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
__lowerCAmelCase: Dict = router_dtype
__lowerCAmelCase: Optional[Any] = router_ignore_padding_tokens
__lowerCAmelCase: Union[str, Any] = relative_attention_num_buckets
__lowerCAmelCase: str = relative_attention_max_distance
__lowerCAmelCase: Optional[int] = dropout_rate
__lowerCAmelCase: Optional[Any] = layer_norm_epsilon
__lowerCAmelCase: int = initializer_factor
__lowerCAmelCase: Tuple = feed_forward_proj
__lowerCAmelCase: int = use_cache
__lowerCAmelCase: int = add_router_probs
__lowerCAmelCase: Optional[Any] = router_z_loss_coef
__lowerCAmelCase: Dict = router_aux_loss_coef
__lowerCAmelCase: Union[str, Any] = self.feed_forward_proj.split("-")
__lowerCAmelCase: Tuple = act_info[-1]
__lowerCAmelCase: str = act_info[0] == "gated"
if len(UpperCamelCase__) > 1 and act_info[0] != "gated" or len(UpperCamelCase__) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowerCAmelCase: List[str] = "gelu_new"
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ , )
| 217
| 1
|
'''simple docstring'''
def a_ ( __snake_case : bytes ) -> str:
"""simple docstring"""
return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] )
def a_ ( __snake_case : str ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(__snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _snake_case ( snake_case_ ):
_A : Optional[Any] = '''gptj'''
_A : Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int]=50_400 ,SCREAMING_SNAKE_CASE__ : Tuple=2_048 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4_096 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=28 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=16 ,SCREAMING_SNAKE_CASE__ : str=64 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Tuple="gelu_new" ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 ,SCREAMING_SNAKE_CASE__ : List[str]=0.02 ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=50_256 ,SCREAMING_SNAKE_CASE__ : Dict=50_256 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
SCREAMING_SNAKE_CASE:Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE:Union[str, Any] = n_positions
SCREAMING_SNAKE_CASE:Any = n_embd
SCREAMING_SNAKE_CASE:List[Any] = n_layer
SCREAMING_SNAKE_CASE:str = n_head
SCREAMING_SNAKE_CASE:str = n_inner
SCREAMING_SNAKE_CASE:Dict = rotary_dim
SCREAMING_SNAKE_CASE:List[str] = activation_function
SCREAMING_SNAKE_CASE:Optional[Any] = resid_pdrop
SCREAMING_SNAKE_CASE:Any = embd_pdrop
SCREAMING_SNAKE_CASE:int = attn_pdrop
SCREAMING_SNAKE_CASE:Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE:Dict = initializer_range
SCREAMING_SNAKE_CASE:Dict = use_cache
SCREAMING_SNAKE_CASE:Dict = bos_token_id
SCREAMING_SNAKE_CASE:int = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase__ ,eos_token_id=UpperCAmelCase__ ,tie_word_embeddings=UpperCAmelCase__ ,**UpperCAmelCase__ )
class _snake_case ( snake_case_ ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,SCREAMING_SNAKE_CASE__ : str = "default" ,SCREAMING_SNAKE_CASE__ : List[PatchingSpec] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
super().__init__(UpperCAmelCase__ ,task=UpperCAmelCase__ ,patching_specs=UpperCAmelCase__ ,use_past=UpperCAmelCase__ )
if not getattr(self._config ,"pad_token_id" ,UpperCAmelCase__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE:List[Any] = 0
@property
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ ,direction="inputs" )
SCREAMING_SNAKE_CASE:Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __UpperCamelCase ( self : Optional[Any] ):
return self._config.n_layer
@property
def __UpperCamelCase ( self : List[Any] ):
return self._config.n_head
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None ,):
SCREAMING_SNAKE_CASE:Tuple = super(UpperCAmelCase__ ,self ).generate_dummy_inputs(
UpperCAmelCase__ ,batch_size=UpperCAmelCase__ ,seq_length=UpperCAmelCase__ ,is_pair=UpperCAmelCase__ ,framework=UpperCAmelCase__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE:str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE:int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE:Optional[int] = seqlen + 2
SCREAMING_SNAKE_CASE:Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE:str = [
(torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE:int = common_inputs["""attention_mask"""]
if self.use_past:
SCREAMING_SNAKE_CASE:Tuple = ordered_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE:Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCAmelCase__ ,UpperCAmelCase__ ,dtype=UpperCAmelCase__ )] ,dim=1 )
return ordered_inputs
@property
def __UpperCamelCase ( self : List[Any] ):
return 13
| 139
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device('cpu')
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = dct.pop(UpperCamelCase__ )
_a : Dict = val
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = []
for k in state_dict.keys():
_a : Any = k
if ".pwconv" in k:
_a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_a : int = k_new.split(""".""" )
if ls[2].isdigit():
_a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : Optional[int] = 1_0_0_0
_a : Optional[Any] = """huggingface/label-files"""
_a : Optional[Any] = """imagenet-1k-id2label.json"""
_a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Dict = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : Any = [3, 3, 6, 4]
_a : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : Any = [3, 3, 9, 6]
_a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : List[Any] = [4, 3, 1_0, 5]
_a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : List[Any] = [4, 4, 1_2, 6]
_a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ )
else:
_a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : int = checkpoint
_a : Optional[Any] = create_rename_keys(UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval()
hf_model.load_state_dict(UpperCamelCase__ )
# prepare test inputs
_a : Any = prepare_img()
_a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# compare outputs from both models
_a : Dict = get_expected_output(UpperCamelCase__ )
_a : int = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 294
| 0
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
SCREAMING_SNAKE_CASE :Optional[Any] = datasets.load_iris()
SCREAMING_SNAKE_CASE :Dict = np.array(data["""data"""])
SCREAMING_SNAKE_CASE :Dict = np.array(data["""target"""])
SCREAMING_SNAKE_CASE :Optional[Any] = data["""target_names"""]
SCREAMING_SNAKE_CASE :int = train_test_split(X, y)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
return np.linalg.norm(np.array(SCREAMING_SNAKE_CASE_ ) - np.array(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 )-> Tuple:
"""simple docstring"""
UpperCamelCase_ = zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# List of distances of all points from the point to be classified
UpperCamelCase_ = []
for data_point in data:
UpperCamelCase_ = euclidean_distance(data_point[0] , SCREAMING_SNAKE_CASE_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCamelCase_ = [i[1] for i in sorted(SCREAMING_SNAKE_CASE_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCamelCase_ = Counter(SCREAMING_SNAKE_CASE_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 371
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False )-> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = input_str.split("_" )
UpperCamelCase_ = 0 if use_pascal else 1
UpperCamelCase_ = words[start_index:]
UpperCamelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase_ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 192
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Tuple ) -> Any:
# Construct model
if gpta_config_file == "":
A__ : Dict = GPTaConfig()
else:
A__ : List[Any] = GPTaConfig.from_json_file(lowercase_ )
A__ : Tuple = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 192
| 1
|
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = [x.strip() for x in open(UpperCamelCase__ ).readlines()]
_a : int = [x.strip() for x in open(UpperCamelCase__ ).readlines()][: len(UpperCamelCase__ )]
_a : int = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
if save_path is not None:
save_json(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 371
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 324
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> int:
super().__init__()
self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
# create a imagenet -> id dictionary for easier use
a : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
a : int = int(lowerCAmelCase__ )
a : Any = dict(sorted(self.labels.items() ) )
def __a ( self , lowerCAmelCase__ ) -> List[int]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[Any] = list(lowerCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
a : Dict = len(lowerCAmelCase__ )
a : Tuple = self.transformer.config.sample_size
a : Tuple = self.transformer.config.in_channels
a : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
a : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
a : List[str] = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 )
a : str = torch.tensor([1000] * batch_size , device=self.device )
a : Dict = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
a : Any = latent_model_input[: len(lowerCAmelCase__ ) // 2]
a : Tuple = torch.cat([half, half] , dim=0 )
a : List[str] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = t
if not torch.is_tensor(lowerCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a : Optional[int] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Union[str, Any] = torch.floataa if is_mps else torch.floataa
else:
a : str = torch.intaa if is_mps else torch.intaa
a : List[str] = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
a : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
a : Union[str, Any] = self.transformer(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
a, a : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a, a : Union[str, Any] = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 )
a : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 )
a : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a, a : str = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 )
else:
a : Any = noise_pred
# compute previous image: x_t -> x_t-1
a : Optional[int] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
if guidance_scale > 1:
a, a : Tuple = latent_model_input.chunk(2 , dim=0 )
else:
a : Tuple = latent_model_input
a : Optional[Any] = 1 / self.vae.config.scaling_factor * latents
a : Any = self.vae.decode(lowerCAmelCase__ ).sample
a : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a : int = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 105
| 0
|
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_a : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class __A ( _lowerCAmelCase ):
def __init__( self , **a__ ):
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self , a__ , **a__ ):
return super().__call__(_lowercase , **_lowercase )
def __A ( self , **a__ ):
_lowerCAmelCase : Tuple = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase : Any = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def __A ( self , a__ , a__=None , a__="This is a sound of {}." ):
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase : Any = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase : List[Any] = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase : int = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase : Dict = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase : int = candidate_labels
_lowerCAmelCase : List[str] = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase : List[Any] = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase : List[Any] = [text_inputs]
return inputs
def __A ( self , a__ ):
_lowerCAmelCase : str = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase : Tuple = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase : Optional[Any] = text_inputs[0][0]
_lowerCAmelCase : int = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase : int = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase : List[Any] = logits.softmax(dim=0 )
_lowerCAmelCase : Optional[int] = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase : int = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda a__ : -x[0] )
]
return result
| 370
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
| 126
| 0
|
def __lowerCAmelCase ( a__ ) -> str:
return "".join([hex(a__ )[2:].zfill(2 ).upper() for byte in list(a__ )] )
def __lowerCAmelCase ( a__ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(a__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Optional[int] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 6
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE :int = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A , __A , __A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
__A = import_module("tasks" )
try:
__A = getattr(a_ , model_args.task_type )
__A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , a_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__A = token_classification_task.get_labels(data_args.labels )
__A = dict(enumerate(a_ ) )
__A = len(a_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , idalabel=a_ , labelaid={label: i for i, label in enumerate(a_ )} , cache_dir=model_args.cache_dir , )
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__A = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , )
# Get datasets
__A = (
TokenClassificationDataset(
token_classification_task=a_ , data_dir=data_args.data_dir , tokenizer=a_ , labels=a_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__A = (
TokenClassificationDataset(
token_classification_task=a_ , data_dir=data_args.data_dir , tokenizer=a_ , labels=a_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(a_ , a_ ) -> Tuple[List[int], List[int]]:
__A = np.argmax(a_ , axis=2 )
__A , __A = preds.shape
__A = [[] for _ in range(a_ )]
__A = [[] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(a_ ) -> Dict:
__A , __A = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(a_ , a_ ),
"precision": precision_score(a_ , a_ ),
"recall": recall_score(a_ , a_ ),
"f1": fa_score(a_ , a_ ),
}
# Data collator
__A = DataCollatorWithPadding(a_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__A = Trainer(
model=a_ , args=a_ , train_dataset=a_ , eval_dataset=a_ , compute_metrics=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(a_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , a_ , a_ )
writer.write("%s = %s\n" % (key, value) )
results.update(a_ )
# Predict
if training_args.do_predict:
__A = TokenClassificationDataset(
token_classification_task=a_ , data_dir=data_args.data_dir , tokenizer=a_ , labels=a_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__A , __A , __A = trainer.predict(a_ )
__A , __A = align_predictions(a_ , a_ )
__A = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(a_ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , a_ , a_ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
__A = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(a_ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(a_ , a_ , a_ )
return results
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 124
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :int = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
__UpperCAmelCase = ksize + 1
__UpperCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_snake_case ):
for x in range(_snake_case ):
# distance from center
__UpperCAmelCase = x - ksize // 2
__UpperCAmelCase = y - ksize // 2
# degree to radiant
__UpperCAmelCase = theta / 1_8_0 * np.pi
__UpperCAmelCase = np.cos(_theta )
__UpperCAmelCase = np.sin(_theta )
# get kernel x
__UpperCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
__UpperCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
__UpperCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ : List[str] = imread('../image_data/lena.jpg')
# turn image in gray scale value
A_ : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ : Any = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ : Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ : Tuple = out / out.max() * 255
A_ : Optional[Any] = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 333
|
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class snake_case_( unittest.TestCase ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : int = 3_2 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , UpperCamelCase_ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : int=3_0 , UpperCamelCase_ : str=4_0_0 , UpperCamelCase_ : List[Any]=3 , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : Union[str, Any] = do_resize
lowerCAmelCase : List[str] = size if size is not None else {'''shortest_edge''': 2_8_8}
lowerCAmelCase : int = size_divisor
lowerCAmelCase : List[str] = do_rescale
lowerCAmelCase : Optional[Any] = rescale_factor
lowerCAmelCase : Dict = do_normalize
lowerCAmelCase : Any = do_center_crop
lowerCAmelCase : Union[str, Any] = image_mean
lowerCAmelCase : Optional[Any] = image_std
lowerCAmelCase : Union[str, Any] = do_pad
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Union[str, Any] = min_resolution
lowerCAmelCase : int = max_resolution
def lowerCamelCase__ ( self : Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=False ):
if not batched:
lowerCAmelCase : Dict = self.size['''shortest_edge''']
lowerCAmelCase : Dict = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = image.size
else:
lowerCAmelCase, lowerCAmelCase : List[Any] = image.shape[1], image.shape[2]
lowerCAmelCase : Union[str, Any] = size / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowerCAmelCase, lowerCAmelCase : Dict = size, scale * w
else:
lowerCAmelCase, lowerCAmelCase : Optional[int] = scale * h, size
lowerCAmelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(UpperCamelCase_ , UpperCamelCase_ ) > max_size:
lowerCAmelCase : int = max_size / max(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = newh * scale
lowerCAmelCase : Tuple = neww * scale
lowerCAmelCase, lowerCAmelCase : List[str] = int(newh + 0.5 ), int(neww + 0.5 )
lowerCAmelCase, lowerCAmelCase : Tuple = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowerCAmelCase : Optional[int] = []
for image in image_inputs:
lowerCAmelCase, lowerCAmelCase : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Union[str, Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
lowerCAmelCase : Union[str, Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size_divisor''' ) )
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : Optional[Any] ):
# Initialize image processor
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCAmelCase, lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowerCAmelCase, lowerCAmelCase : int = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[Any] ):
# Initialize image processor
lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : Tuple = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowerCAmelCase, lowerCAmelCase : str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[int] ):
# Initialize image processor
lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCAmelCase, lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
lowerCAmelCase, lowerCAmelCase : str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 60
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ['''gpt2''']
__A = '''gpt2'''
if is_tf_available():
class lowercase_ ( tf.Module ):
def __init__( self : List[str] , A__ : List[str] ) -> Optional[Any]:
super().__init__()
_snake_case = tokenizer
_snake_case = AutoConfig.from_pretrained(A__ )
_snake_case = TFGPTaLMHeadModel.from_config(A__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def UpperCamelCase_ ( self : Tuple , A__ : Dict ) -> int:
_snake_case = self.tokenizer(A__ )
_snake_case = tokenized['''input_ids'''].to_tensor()
_snake_case = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case = self.model(input_ids=A__ , attention_mask=A__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ) -> str:
super().setUp()
_snake_case = [GPTaTokenizer.from_pretrained(A__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case = [TFGPTaTokenizer.from_pretrained(A__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_snake_case = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : List[str] ) -> Optional[int]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case = tokenizer([test_inputs] , return_tensors='''tf''' )
_snake_case = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case = python_outputs[key].numpy()
_snake_case = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(A__ , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : int ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
_snake_case = tf.function(A__ )
for test_inputs in self.test_sentences:
_snake_case = tf.constant(A__ )
_snake_case = compiled_tokenizer(A__ )
_snake_case = tf_tokenizer(A__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
_snake_case = ModelToSave(tokenizer=A__ )
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = model.serving(A__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case = Path(A__ ) / '''saved.model'''
tf.saved_model.save(A__ , A__ , signatures={'''serving_default''': model.serving} )
_snake_case = tf.saved_model.load(A__ )
_snake_case = loaded_model.signatures['''serving_default'''](A__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : int ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = tf_tokenizer(A__ ) # Build model with some sample inputs
_snake_case = tf_tokenizer.get_config()
_snake_case = TFGPTaTokenizer.from_config(A__ )
_snake_case = model_from_config(A__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case = 123123
for max_length in [3, 5, 1024]:
_snake_case = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case = tf_tokenizer(A__ , max_length=A__ )
_snake_case = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 278
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case_(_UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
_snake_case = checkpoints.load_tax_checkpoint(_UpperCamelCase )
_snake_case = flatten_dict(_UpperCamelCase )
return flax_params
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
_snake_case = {}
_snake_case = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
_snake_case = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_snake_case = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_snake_case = new_key.replace(_UpperCamelCase , _UpperCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_snake_case = new_key.replace(_UpperCamelCase , _UpperCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _UpperCamelCase )
_snake_case = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_snake_case = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _UpperCamelCase )
_snake_case = flax_dict[key]
_snake_case = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_snake_case = torch.from_numpy(converted_dict[key].T )
else:
_snake_case = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> List[Any]:
"""simple docstring"""
_snake_case = get_flax_param(_UpperCamelCase )
if not use_large:
_snake_case = PixaStructVisionConfig()
_snake_case = PixaStructTextConfig()
else:
_snake_case = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
_snake_case = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
_snake_case = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_UpperCamelCase )
_snake_case = PixaStructForConditionalGeneration(_UpperCamelCase )
_snake_case = rename_and_convert_flax_params(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
_snake_case = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
_snake_case = PixaStructImageProcessor()
_snake_case = PixaStructProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase )
if use_large:
_snake_case = 4_096
_snake_case = True
# mkdir if needed
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
print('''Model saved in {}'''.format(_UpperCamelCase ) )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 278
| 1
|
"""simple docstring"""
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class a ( lowerCAmelCase_ ):
_snake_case : Optional[int] = 'facebook/bart-large-mnli'
_snake_case : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_snake_case : List[Any] = 'text_classifier'
_snake_case : Optional[int] = AutoTokenizer
_snake_case : str = AutoModelForSequenceClassification
_snake_case : Dict = ['text', ['text']]
_snake_case : List[str] = ['text']
def lowerCAmelCase_ ( self : Any ):
super().setup()
_UpperCAmelCase = self.model.config
_UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
_UpperCAmelCase = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = outputs.logits
_UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 289
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( a__ ) -> list[int]:
__a = [True] * limit
__a = False
__a = False
__a = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__a = i * 2
while index < limit:
__a = False
__a = index + i
__a = [2]
for i in range(3 , a__ , 2 ):
if is_prime[i]:
primes.append(a__ )
return primes
def __lowerCAmelCase ( a__ = 100_0000 ) -> int:
__a = prime_sieve(a__ )
__a = 0
__a = 0
for i in range(len(a__ ) ):
for j in range(i + length , len(a__ ) ):
__a = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__a = j - i
__a = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 33
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any=13 ,lowerCamelCase__ : Any=30 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Any=32 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Union[str, Any]=4 ,lowerCamelCase__ : Optional[Any]=37 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[int]=10 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Any=2 ,):
'''simple docstring'''
_UpperCamelCase : int = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : Any = is_training
_UpperCamelCase : str = use_labels
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : List[Any] = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : int = type_sequence_label_size
_UpperCamelCase : Optional[int] = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Tuple = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Dict = (image_size // patch_size) ** 2
_UpperCamelCase : Any = num_patches + 1
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ViTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : str = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Any = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : int = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Dict = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Any = ViTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : int = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_UpperCamelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = model_class(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Any = [*signature.parameters.keys()]
_UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = ViTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( ):
_UpperCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(lowerCamelCase__ )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : int = prepare_img()
_UpperCamelCase : Dict = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**lowerCamelCase__ )
# verify the logits
_UpperCamelCase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : Tuple = ViTModel.from_pretrained('facebook/dino-vits8' ).to(lowerCamelCase__ )
_UpperCamelCase : str = ViTImageProcessor.from_pretrained('facebook/dino-vits8' ,size=480 )
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Optional[int] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' )
_UpperCamelCase : List[Any] = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(lowerCamelCase__ ,interpolate_pos_encoding=lowerCamelCase__ )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,lowerCamelCase__ )
_UpperCamelCase : str = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowerCamelCase__ ,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : str = ViTModel.from_pretrained('facebook/dino-vits8' ,torch_dtype=torch.floataa ,device_map='auto' )
_UpperCamelCase : List[Any] = self.default_image_processor
_UpperCamelCase : Optional[Any] = prepare_img()
_UpperCamelCase : List[Any] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' )
_UpperCamelCase : str = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : List[Any] = model(lowerCamelCase__ )
| 83
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase_ ( snake_case_ : Dict ) ->Tuple:
lowerCamelCase__ : List[str] =fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , snake_case_ ).groups()[0]
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase_ :int , lowerCamelCase_ :Any=None , lowerCamelCase_ :Any=None ):
"""simple docstring"""
lowerCamelCase__ : Tuple =file_names
lowerCamelCase__ : str =image_transform
lowerCamelCase__ : str =label_to_id
def __len__( self :Optional[int] ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self :Optional[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.file_names[idx]
lowerCamelCase__ : Dict =PIL.Image.open(lowerCamelCase_ )
lowerCamelCase__ : Dict =raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCamelCase__ : int =self.image_transform(lowerCamelCase_ )
lowerCamelCase__ : List[str] =extract_label(lowerCamelCase_ )
if self.label_to_id is not None:
lowerCamelCase__ : Optional[int] =self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any ) ->Dict:
# Initialize accelerator
if args.with_tracking:
lowerCamelCase__ : List[str] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCamelCase__ : Tuple =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[Any] =config['lr']
lowerCamelCase__ : List[str] =int(config['num_epochs'] )
lowerCamelCase__ : List[str] =int(config['seed'] )
lowerCamelCase__ : Dict =int(config['batch_size'] )
lowerCamelCase__ : Optional[int] =config['image_size']
if not isinstance(snake_case_ , (list, tuple) ):
lowerCamelCase__ : Optional[int] =(image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__ : Optional[Any] =args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__ : Union[str, Any] =int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__ : int =None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__ : Tuple =os.path.split(snake_case_ )[-1].split('.' )[0]
accelerator.init_trackers(snake_case_ , snake_case_ )
# Grab all the image filenames
lowerCamelCase__ : List[str] =[os.path.join(args.data_dir , snake_case_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCamelCase__ : str =[extract_label(snake_case_ ) for fname in file_names]
lowerCamelCase__ : Any =list(set(snake_case_ ) )
id_to_label.sort()
lowerCamelCase__ : List[Any] ={lbl: i for i, lbl in enumerate(snake_case_ )}
# Set the seed before splitting the data.
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# Split our filenames between train and validation
lowerCamelCase__ : int =np.random.permutation(len(snake_case_ ) )
lowerCamelCase__ : Tuple =int(0.8 * len(snake_case_ ) )
lowerCamelCase__ : str =random_perm[:cut]
lowerCamelCase__ : Dict =random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__ : str =Compose([RandomResizedCrop(snake_case_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__ : Any =PetsDataset(
[file_names[i] for i in train_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# For evaluation, we use a deterministic Resize
lowerCamelCase__ : Optional[int] =Compose([Resize(snake_case_ ), ToTensor()] )
lowerCamelCase__ : Dict =PetsDataset([file_names[i] for i in eval_split] , image_transform=snake_case_ , label_to_id=snake_case_ )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[Any] =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
lowerCamelCase__ : int =DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Dict =create_model('resnet50d' , pretrained=snake_case_ , num_classes=len(snake_case_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : str =model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__ : Dict =False
for param in model.get_classifier().parameters():
lowerCamelCase__ : List[str] =True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__ : Any =torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__ : Dict =torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : int =torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
lowerCamelCase__ : Dict =OneCycleLR(optimizer=snake_case_ , max_lr=snake_case_ , epochs=snake_case_ , steps_per_epoch=len(snake_case_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any =accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : int =0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__ : Optional[int] =0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__ : int =os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__ : int =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__ : Optional[int] =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__ : Tuple =os.path.splitext(snake_case_ )[0]
if "epoch" in training_difference:
lowerCamelCase__ : Union[str, Any] =int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCamelCase__ : Optional[int] =None
else:
lowerCamelCase__ : List[Any] =int(training_difference.replace('step_' , '' ) )
lowerCamelCase__ : int =resume_step // len(snake_case_ )
resume_step -= starting_epoch * len(snake_case_ )
# Now we train the model
for epoch in range(snake_case_ , snake_case_ ):
model.train()
if args.with_tracking:
lowerCamelCase__ : str =0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__ : Tuple =accelerator.skip_first_batches(snake_case_ , snake_case_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__ : str =train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : int ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : List[str] =(batch['image'] - mean) / std
lowerCamelCase__ : Any =model(snake_case_ )
lowerCamelCase__ : List[Any] =torch.nn.functional.cross_entropy(snake_case_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ : int =f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__ : List[Any] =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
model.eval()
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[Any] =0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Union[str, Any] ={k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : int =(batch['image'] - mean) / std
with torch.no_grad():
lowerCamelCase__ : List[str] =model(snake_case_ )
lowerCamelCase__ : Optional[int] =outputs.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCamelCase__ : Union[str, Any] =predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__ : List[str] =accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_0_0 * eval_metric,
'train_loss': total_loss.item() / len(snake_case_ ),
'epoch': epoch,
} , step=snake_case_ , )
if checkpointing_steps == "epoch":
lowerCamelCase__ : Tuple =f"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__ : Tuple =os.path.join(args.output_dir , snake_case_ )
accelerator.save_state(snake_case_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase_ ( ) ->int:
lowerCamelCase__ : Any =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=snake_case_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=snake_case_ , default=snake_case_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=snake_case_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=snake_case_ , default=snake_case_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=snake_case_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCamelCase__ : Dict =parser.parse_args()
lowerCamelCase__ : List[str] ={'lr': 3E-2, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 6_4, 'image_size': 2_2_4}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 126
| 0
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Dict = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Tuple = AlbertTokenizer
UpperCAmelCase__ : Union[str, Any] = AlbertTokenizerFast
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[Any] = True
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = AlbertTokenizer(_snake_case)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : Dict , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''this is a test'''
UpperCAmelCase_ = '''this is a test'''
return input_text, output_text
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<pad>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''▁eloquent''')
self.assertEqual(len(_snake_case) , 30000)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30000)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
UpperCAmelCase_ = rust_tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = AlbertTokenizer(_snake_case , keep_accents=_snake_case)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [48, 25, 21, 1289])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_snake_case , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_snake_case)
self.assertListEqual(_snake_case , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_snake_case)
self.assertListEqual(
_snake_case , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AlbertTokenizer(_snake_case)
UpperCAmelCase_ = tokenizer.encode('''sequence builders''')
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''')
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_snake_case)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 7
|
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
| 1
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=3_0 , A=2 , A=3 , A=True , A=True , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_0 , A=0.02 , A=None , A=2 , ) -> Dict:
snake_case : str = parent
snake_case : List[str] = batch_size
snake_case : str = image_size
snake_case : int = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : Union[str, Any] = is_training
snake_case : Tuple = use_labels
snake_case : Dict = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : str = intermediate_size
snake_case : Any = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : Optional[Any] = type_sequence_label_size
snake_case : List[str] = initializer_range
snake_case : List[Any] = scope
snake_case : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case : str = (image_size // patch_size) ** 2
snake_case : int = num_patches + 1
def UpperCAmelCase ( self ) -> int:
snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : str = None
if self.use_labels:
snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : str = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> str:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[Any] = ViTModel(config=A )
model.to(A )
model.eval()
snake_case : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A ) -> Optional[int]:
snake_case : Union[str, Any] = ViTForMaskedImageModeling(config=A )
model.to(A )
model.eval()
snake_case : Optional[Any] = model(A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case : List[str] = 1
snake_case : Union[str, Any] = ViTForMaskedImageModeling(A )
model.to(A )
model.eval()
snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : List[Any] = model(A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Tuple = self.type_sequence_label_size
snake_case : Dict = ViTForImageClassification(A )
model.to(A )
model.eval()
snake_case : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : int = 1
snake_case : List[str] = ViTForImageClassification(A )
model.to(A )
model.eval()
snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[int] = ViTModelTester(self )
snake_case : Tuple = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
def UpperCAmelCase ( self ) -> List[Any]:
snake_case , snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Dict = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(A )
snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[Any] = [*signature.parameters.keys()]
snake_case : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase ( self ) -> str:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
snake_case : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> int:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> int:
snake_case : Tuple = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(A )
snake_case : List[str] = self.default_image_processor
snake_case : Optional[int] = prepare_img()
snake_case : int = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
snake_case : Optional[Any] = model(**A )
# verify the logits
snake_case : Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
snake_case : int = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Any:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
snake_case : Dict = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(A )
snake_case : Tuple = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_8_0 )
snake_case : Union[str, Any] = prepare_img()
snake_case : Dict = image_processor(images=A , return_tensors="""pt""" )
snake_case : Union[str, Any] = inputs.pixel_values.to(A )
# forward pass
with torch.no_grad():
snake_case : List[Any] = model(A , interpolate_pos_encoding=A )
# verify the logits
snake_case : Any = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , A )
snake_case : Dict = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self ) -> List[str]:
snake_case : str = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
snake_case : Dict = self.default_image_processor
snake_case : List[str] = prepare_img()
snake_case : str = image_processor(images=A , return_tensors="""pt""" )
snake_case : List[Any] = inputs.pixel_values.to(A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case : List[str] = model(A )
| 124
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Optional[int] = len(lowercase )
snake_case : str = matrix_length // 2
snake_case : int = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : str = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[Any] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : str = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case : List[Any] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[str] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Tuple = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : str = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : int = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : List[str] = matrix_addition(lowercase ,lowercase )
snake_case : Any = matrix_addition(lowercase ,lowercase )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : Optional[Any] = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : str = matrix_dimensions(lowercase )
snake_case : Optional[Any] = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Optional[Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : Any = matrixa
snake_case : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Optional[int] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : int = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 124
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
A_ = len(_UpperCamelCase )
while cur > 1:
# Find the maximum number in arr
A_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A_ = arr[mi::-1] + arr[mi + 1 : len(_UpperCamelCase )]
# Reverse whole list
A_ = arr[cur - 1 :: -1] + arr[cur : len(_UpperCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
__snake_case : Dict = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Dict = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 363
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Tuple, _UpperCamelCase : List[str] ) -> int:
A_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ = F'''{src_lang}-{tgt_lang}'''
A_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
A_ = os.path.join(_UpperCamelCase, '''README.md''' )
print(F'''Generating {path}''' )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
__snake_case : Any = Path(__file__).resolve().parent.parent.parent
__snake_case : Tuple = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__snake_case , __snake_case , __snake_case : Any = model_name.split('-')
__snake_case : int = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ):
lowerCAmelCase_ = tau * frequency / samplerate
lowerCAmelCase_ = sin(_A )
lowerCAmelCase_ = cos(_A )
lowerCAmelCase_ = _sin / (2 * q_factor)
lowerCAmelCase_ = (1 - _cos) / 2
lowerCAmelCase_ = 1 - _cos
lowerCAmelCase_ = 1 + alpha
lowerCAmelCase_ = -2 * _cos
lowerCAmelCase_ = 1 - alpha
lowerCAmelCase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ):
lowerCAmelCase_ = tau * frequency / samplerate
lowerCAmelCase_ = sin(_A )
lowerCAmelCase_ = cos(_A )
lowerCAmelCase_ = _sin / (2 * q_factor)
lowerCAmelCase_ = (1 + _cos) / 2
lowerCAmelCase_ = -1 - _cos
lowerCAmelCase_ = 1 + alpha
lowerCAmelCase_ = -2 * _cos
lowerCAmelCase_ = 1 - alpha
lowerCAmelCase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ):
lowerCAmelCase_ = tau * frequency / samplerate
lowerCAmelCase_ = sin(_A )
lowerCAmelCase_ = cos(_A )
lowerCAmelCase_ = _sin / (2 * q_factor)
lowerCAmelCase_ = _sin / 2
lowerCAmelCase_ = 0
lowerCAmelCase_ = -ba
lowerCAmelCase_ = 1 + alpha
lowerCAmelCase_ = -2 * _cos
lowerCAmelCase_ = 1 - alpha
lowerCAmelCase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ):
lowerCAmelCase_ = tau * frequency / samplerate
lowerCAmelCase_ = sin(_A )
lowerCAmelCase_ = cos(_A )
lowerCAmelCase_ = _sin / (2 * q_factor)
lowerCAmelCase_ = 1 - alpha
lowerCAmelCase_ = -2 * _cos
lowerCAmelCase_ = 1 + alpha
lowerCAmelCase_ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ):
lowerCAmelCase_ = tau * frequency / samplerate
lowerCAmelCase_ = sin(_A )
lowerCAmelCase_ = cos(_A )
lowerCAmelCase_ = _sin / (2 * q_factor)
lowerCAmelCase_ = 10 ** (gain_db / 40)
lowerCAmelCase_ = 1 + alpha * big_a
lowerCAmelCase_ = -2 * _cos
lowerCAmelCase_ = 1 - alpha * big_a
lowerCAmelCase_ = 1 + alpha / big_a
lowerCAmelCase_ = -2 * _cos
lowerCAmelCase_ = 1 - alpha / big_a
lowerCAmelCase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ):
lowerCAmelCase_ = tau * frequency / samplerate
lowerCAmelCase_ = sin(_A )
lowerCAmelCase_ = cos(_A )
lowerCAmelCase_ = _sin / (2 * q_factor)
lowerCAmelCase_ = 10 ** (gain_db / 40)
lowerCAmelCase_ = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase_ = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase_ = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase_ = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase_ = 2 * sqrt(_A ) * alpha
lowerCAmelCase_ = big_a * (pmc + aaa)
lowerCAmelCase_ = 2 * big_a * mpc
lowerCAmelCase_ = big_a * (pmc - aaa)
lowerCAmelCase_ = ppmc + aaa
lowerCAmelCase_ = -2 * pmpc
lowerCAmelCase_ = ppmc - aaa
lowerCAmelCase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ):
lowerCAmelCase_ = tau * frequency / samplerate
lowerCAmelCase_ = sin(_A )
lowerCAmelCase_ = cos(_A )
lowerCAmelCase_ = _sin / (2 * q_factor)
lowerCAmelCase_ = 10 ** (gain_db / 40)
lowerCAmelCase_ = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase_ = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase_ = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase_ = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase_ = 2 * sqrt(_A ) * alpha
lowerCAmelCase_ = big_a * (ppmc + aaa)
lowerCAmelCase_ = -2 * big_a * pmpc
lowerCAmelCase_ = big_a * (ppmc - aaa)
lowerCAmelCase_ = pmc + aaa
lowerCAmelCase_ = 2 * mpc
lowerCAmelCase_ = pmc - aaa
lowerCAmelCase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 278
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCamelCase ( _A = 3 ):
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase_ = QuantumRegister(_A , '''qr''' )
lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' )
lowerCAmelCase_ = QuantumCircuit(_A , _A )
lowerCAmelCase_ = number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase_ = execute(_A , _A , shots=10000 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 278
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase = '''docs/source/en/_toctree.yml'''
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Any = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
__UpperCAmelCase : List[str] = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase : str = []
for duplicate_key in duplicates:
__UpperCAmelCase : Union[str, Any] = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__a , key=lambda lowerCAmelCase__ : s["title"].lower() )
def lowercase_ ( lowerCAmelCase__ : Optional[int]=False ):
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
__UpperCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
__UpperCAmelCase : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__UpperCAmelCase : List[str] = api_doc[model_idx]['sections']
__UpperCAmelCase : List[Any] = [(idx, section) for idx, section in enumerate(__a ) if 'sections' in section]
__UpperCAmelCase : Tuple = False
for idx, modality_doc in modalities_docs:
__UpperCAmelCase : List[Any] = modality_doc['sections']
__UpperCAmelCase : Any = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
__UpperCAmelCase : Union[str, Any] = True
if overwrite:
__UpperCAmelCase : str = new_modality_doc
if diff:
if overwrite:
__UpperCAmelCase : Dict = model_doc
__UpperCAmelCase : Dict = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_UpperCamelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 355
|
'''simple docstring'''
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : int = data
__UpperCAmelCase : int = previous
__UpperCAmelCase : Union[str, Any] = next_node
def __str__( self ) -> str:
'''simple docstring'''
return f'{self.data}'
def __A ( self ) -> int:
'''simple docstring'''
return self.data
def __A ( self ) -> List[str]:
'''simple docstring'''
return self.next
def __A ( self ) -> str:
'''simple docstring'''
return self.previous
class _A :
def __init__( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = head
def __iter__( self ) -> str:
'''simple docstring'''
return self
def __A ( self ) -> str:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__UpperCAmelCase : List[str] = self.current.get_data()
__UpperCAmelCase : int = self.current.get_next()
return value
class _A :
def __init__( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = None # First node in list
__UpperCAmelCase : List[str] = None # Last node in list
def __str__( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.head
__UpperCAmelCase : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
__UpperCAmelCase : Any = current.get_next()
return " ".join(str(__UpperCAmelCase ) for node in nodes )
def __contains__( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.head
while current:
if current.get_data() == value:
return True
__UpperCAmelCase : Optional[Any] = current.get_next()
return False
def __iter__( self ) -> str:
'''simple docstring'''
return LinkedListIterator(self.head )
def __A ( self ) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
if self.head is None:
__UpperCAmelCase : str = node
__UpperCAmelCase : List[str] = node
else:
self.insert_before_node(self.head , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.insert_after_node(self.tail , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase )
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.set_tail(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Tuple = node
__UpperCAmelCase : List[Any] = node.previous
if node.get_previous() is None:
__UpperCAmelCase : str = node_to_insert
else:
__UpperCAmelCase : Optional[Any] = node_to_insert
__UpperCAmelCase : List[Any] = node_to_insert
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[str] = node
__UpperCAmelCase : Union[str, Any] = node.next
if node.get_next() is None:
__UpperCAmelCase : Dict = node_to_insert
else:
__UpperCAmelCase : Any = node_to_insert
__UpperCAmelCase : List[str] = node_to_insert
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase )
return
current_position += 1
__UpperCAmelCase : int = node.next
self.insert_after_node(self.tail , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Node:
'''simple docstring'''
__UpperCAmelCase : Dict = self.head
while node:
if node.get_data() == item:
return node
__UpperCAmelCase : List[str] = node.get_next()
raise Exception("""Node not found""" )
def __A ( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if (node := self.get_node(__UpperCAmelCase )) is not None:
if node == self.head:
__UpperCAmelCase : Optional[int] = self.head.get_next()
if node == self.tail:
__UpperCAmelCase : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__UpperCAmelCase )
@staticmethod
def __A ( __UpperCAmelCase ) -> None:
'''simple docstring'''
if node.get_next():
__UpperCAmelCase : Optional[Any] = node.previous
if node.get_previous():
__UpperCAmelCase : int = node.next
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Union[str, Any] = None
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.head is None
def lowercase_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__A : str = parser.parse_args()
__A : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__A : Dict = CLIPImageProcessor()
__A : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__A : List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 33
| 1
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : int = {'vocab_file': 'vocab.txt'}
_lowercase : Any = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
_lowercase : Optional[Any] = {
'openbmb/cpm-ant-10b': 10_24,
}
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = collections.OrderedDict()
with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as reader:
__UpperCAmelCase = reader.readlines()
for index, token in enumerate(snake_case_ ):
__UpperCAmelCase = token.rstrip('''\n''' )
__UpperCAmelCase = index
return vocab
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Optional[int] , _lowercase : List[str] , _lowercase : str="<unk>" , _lowercase : Optional[Any]=2_00 ):
__UpperCAmelCase = vocab
__UpperCAmelCase = unk_token
__UpperCAmelCase = max_input_chars_per_word
def a ( self : int , _lowercase : List[Any] ):
__UpperCAmelCase = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
__UpperCAmelCase = 0
__UpperCAmelCase = []
while start < len(_lowercase ):
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = None
while start < end:
__UpperCAmelCase = ''''''.join(chars[start:end] )
if substr in self.vocab:
__UpperCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
__UpperCAmelCase = end
return sub_tokens
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ["input_ids", "attention_mask"]
a__ : List[Any] = False
def __init__( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="<d>" , _lowercase : int="</d>" , _lowercase : Tuple="<s>" , _lowercase : List[str]="</s>" , _lowercase : Optional[int]="<pad>" , _lowercase : str="<unk>" , _lowercase : int="</n>" , _lowercase : Optional[int]="</_>" , _lowercase : List[str]="left" , **_lowercase : Optional[Any] , ):
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
__UpperCAmelCase = bod_token
__UpperCAmelCase = eod_token
__UpperCAmelCase = load_vocab(_lowercase )
__UpperCAmelCase = self.encoder[space_token]
__UpperCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__UpperCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def a ( self : int ):
return self.encoder[self.bod_token]
@property
def a ( self : Optional[int] ):
return self.encoder[self.eod_token]
@property
def a ( self : Optional[Any] ):
return self.encoder["\n"]
@property
def a ( self : Dict ):
return len(self.encoder )
def a ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self : str , _lowercase : Dict ):
__UpperCAmelCase = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def a ( self : Optional[Any] , _lowercase : Optional[int] , **_lowercase : Dict ):
__UpperCAmelCase = [i for i in token_ids if i >= 0]
__UpperCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def a ( self : Any , _lowercase : int ):
return token in self.encoder
def a ( self : int , _lowercase : List[str] ):
return "".join(_lowercase )
def a ( self : int , _lowercase : int ):
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def a ( self : List[Any] , _lowercase : Any ):
return self.decoder.get(_lowercase , self.unk_token )
def a ( self : Any , _lowercase : str , _lowercase : Optional[str] = None ):
if os.path.isdir(_lowercase ):
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__UpperCAmelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__UpperCAmelCase = 0
if " " in self.encoder:
__UpperCAmelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__UpperCAmelCase = self.encoder['''\n''']
del self.encoder["\n"]
__UpperCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
__UpperCAmelCase = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def a ( self : Any , _lowercase : List[int] , _lowercase : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 86
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_lowerCAmelCase ):
a__ : Union[str, Any] = ["onnx"]
def __init__( self : Any , *_lowercase : Dict , **_lowercase : Any ):
requires_backends(self , ['''onnx'''] )
@classmethod
def a ( cls : str , *_lowercase : List[Any] , **_lowercase : int ):
requires_backends(cls , ['''onnx'''] )
@classmethod
def a ( cls : Union[str, Any] , *_lowercase : List[str] , **_lowercase : Optional[int] ):
requires_backends(cls , ['''onnx'''] )
| 86
| 1
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = AlbertTokenizer
lowerCamelCase = AlbertTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : List[str],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = 'this is a test'
A__ = 'this is a test'
return input_text, output_text
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = '<pad>'
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : List[str] )-> str:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<pad>' )
self.assertEqual(vocab_keys[1],'<unk>' )
self.assertEqual(vocab_keys[-1],'▁eloquent' )
self.assertEqual(len(lowercase_ ),3_0_0_0_0 )
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 )
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],)
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = AlbertTokenizer(lowercase_ )
A__ = tokenizer.encode('sequence builders' )
A__ = tokenizer.encode('multi-sequence build' )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
| 7
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
A__ = 384
A__ = 7
if "tiny" in model_name:
A__ = 96
A__ = (2, 2, 6, 2)
A__ = (3, 6, 12, 24)
elif "small" in model_name:
A__ = 96
A__ = (2, 2, 18, 2)
A__ = (3, 6, 12, 24)
elif "base" in model_name:
A__ = 128
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
A__ = 12
A__ = 512
elif "large" in model_name:
A__ = 192
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
A__ = 12
A__ = 768
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
A__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 )
A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 )
A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(4 , in_channel // 4 )
A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(in_channel // 4 , 4 )
A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ )
A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )
if "norm" in key:
A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
A__ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
A__ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
A__ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 7
| 1
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase__ (snake_case__ : np.ndarray ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase__ (snake_case__ : np.ndarray ):
"""simple docstring"""
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 132
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( A__, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline
_SCREAMING_SNAKE_CASE = ["""prompt"""]
_SCREAMING_SNAKE_CASE = ["""prompt""", """negative_prompt"""]
_SCREAMING_SNAKE_CASE = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE = False
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return 3_2
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return 3_2
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return 1_0_0
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_A )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowerCAmelCase_ : List[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase_ : Dict = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowerCAmelCase_ : Optional[Any] = CLIPVisionModelWithProjection(_A )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Dict = self.dummy_prior
lowerCAmelCase_ : List[str] = self.dummy_image_encoder
lowerCAmelCase_ : Any = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : Optional[int] = self.dummy_image_processor
lowerCAmelCase_ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=10.0 , )
lowerCAmelCase_ : str = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=0 ):
if str(_A ).startswith('mps' ):
lowerCAmelCase_ : Tuple = torch.manual_seed(_A )
else:
lowerCAmelCase_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
lowerCAmelCase_ : str = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Dict = "cpu"
lowerCAmelCase_ : List[Any] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
lowerCAmelCase_ : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs(_A ) )
lowerCAmelCase_ : int = output.image_embeds
lowerCAmelCase_ : Any = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
lowerCAmelCase_ : Any = image[0, -1_0:]
lowerCAmelCase_ : List[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowerCAmelCase_ : Optional[int] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Optional[Any] = torch_device == "cpu"
lowerCAmelCase_ : str = True
lowerCAmelCase_ : List[str] = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[str] = torch_device == "cpu"
lowerCAmelCase_ : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 224
|
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
| 0
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase_ ( __UpperCAmelCase : int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(__UpperCAmelCase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
SCREAMING_SNAKE_CASE_ = QuantumRegister(__UpperCAmelCase , 'qr' )
SCREAMING_SNAKE_CASE_ = ClassicalRegister(__UpperCAmelCase , 'cr' )
SCREAMING_SNAKE_CASE_ = QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = number_of_qubits
for i in range(__UpperCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__UpperCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __UpperCAmelCase , __UpperCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__UpperCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__UpperCAmelCase , __UpperCAmelCase )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE_ = Aer.get_backend('qasm_simulator' )
SCREAMING_SNAKE_CASE_ = execute(__UpperCAmelCase , __UpperCAmelCase , shots=1_00_00 )
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 210
|
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> dict[str, str]:
SCREAMING_SNAKE_CASE_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
SCREAMING_SNAKE_CASE_ = remove_duplicates(key.upper() )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
# First fill cipher with key characters
SCREAMING_SNAKE_CASE_ = {alphabet[i]: char for i, char in enumerate(__UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__UpperCAmelCase ) , 26 ):
SCREAMING_SNAKE_CASE_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
SCREAMING_SNAKE_CASE_ = alphabet[i - offset]
SCREAMING_SNAKE_CASE_ = char
return cipher_alphabet
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : dict[str, str] ) -> str:
return "".join(cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : dict[str, str] ) -> str:
SCREAMING_SNAKE_CASE_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def UpperCAmelCase_ ( ) -> None:
SCREAMING_SNAKE_CASE_ = input('Enter message to encode or decode: ' ).strip()
SCREAMING_SNAKE_CASE_ = input('Enter keyword: ' ).strip()
SCREAMING_SNAKE_CASE_ = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
SCREAMING_SNAKE_CASE_ = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
SCREAMING_SNAKE_CASE_ = create_cipher_map(__UpperCAmelCase )
print(func(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 210
| 1
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
_lowercase =0
if start < end:
_lowercase =randint(__snake_case , __snake_case )
_lowercase =a[end]
_lowercase =a[pivot]
_lowercase =temp
_lowercase , _lowercase =_in_place_partition(__snake_case , __snake_case , __snake_case )
count += _in_place_quick_sort(__snake_case , __snake_case , p - 1 )
count += _in_place_quick_sort(__snake_case , p + 1 , __snake_case )
return count
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =0
_lowercase =randint(__snake_case , __snake_case )
_lowercase =a[end]
_lowercase =a[pivot]
_lowercase =temp
_lowercase =start - 1
for index in range(__snake_case , __snake_case ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowercase =new_pivot_index + 1
_lowercase =a[new_pivot_index]
_lowercase =a[index]
_lowercase =temp
_lowercase =a[new_pivot_index + 1]
_lowercase =a[end]
_lowercase =temp
return new_pivot_index + 1, count
UpperCAmelCase__ = TemporaryFile()
UpperCAmelCase__ = 100 # 1000 elements are to be sorted
UpperCAmelCase__ ,UpperCAmelCase__ = 0, 1 # mean and standard deviation
UpperCAmelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ = np.load(outfile)
UpperCAmelCase__ = len(M) - 1
UpperCAmelCase__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 5
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16
| 0
|
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : Tuple , __magic_name__ : int ) -> str:
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE_ = {}
self.num_vertices += 1
def __A ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
self.add_vertex(__magic_name__ )
self.add_vertex(__magic_name__ )
if head == tail:
return
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def __A ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for i in range(len(__magic_name__ ) ):
SCREAMING_SNAKE_CASE_ = list(edges[i] )
edges.sort(key=lambda __magic_name__ : e[2] )
for i in range(len(__magic_name__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE_ = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def __str__( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE_ = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __A ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __A ( self : Union[str, Any] ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def __A ( __magic_name__ : List[Any]=None , __magic_name__ : Tuple=None ) -> Any:
SCREAMING_SNAKE_CASE_ = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE_ = []
if edges is None:
SCREAMING_SNAKE_CASE_ = []
for vertex in vertices:
g.add_vertex(__magic_name__ )
for edge in edges:
g.add_edge(*__magic_name__ )
return g
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
def __len__( self : Optional[int] ) -> Optional[int]:
return len(self.parent )
def __A ( self : str , __magic_name__ : int ) -> Optional[int]:
if item in self.parent:
return self.find(__magic_name__ )
SCREAMING_SNAKE_CASE_ = item
SCREAMING_SNAKE_CASE_ = 0
return item
def __A ( self : Any , __magic_name__ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(__magic_name__ )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE_ = self.find(self.parent[item] )
return self.parent[item]
def __A ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.find(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.find(__magic_name__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE_ = roota
return roota
return None
@staticmethod
def __A ( __magic_name__ : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = graph.num_vertices
SCREAMING_SNAKE_CASE_ = Graph.UnionFind()
SCREAMING_SNAKE_CASE_ = []
while num_components > 1:
SCREAMING_SNAKE_CASE_ = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = union_find.find(__magic_name__ )
SCREAMING_SNAKE_CASE_ = union_find.find(__magic_name__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cheap_edge[vertex]
if union_find.find(__magic_name__ ) != union_find.find(__magic_name__ ):
union_find.union(__magic_name__ , __magic_name__ )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE_ = num_components - 1
SCREAMING_SNAKE_CASE_ = Graph.build(edges=__magic_name__ )
return mst
| 363
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
SCREAMING_SNAKE_CASE_ = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = (1, 2, 1)
SCREAMING_SNAKE_CASE_ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE_ = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model.fit(disp=__UpperCamelCase , maxiter=6_0_0 , method="nm" )
SCREAMING_SNAKE_CASE_ = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = regressor.predict(__UpperCamelCase )
return y_pred[0]
def a__ ( __UpperCamelCase ):
train_user.sort()
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 2_5 )
SCREAMING_SNAKE_CASE_ = np.percentile(__UpperCamelCase , 7_5 )
SCREAMING_SNAKE_CASE_ = qa - qa
SCREAMING_SNAKE_CASE_ = qa - (iqr * 0.1)
return low_lim
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE_ = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
A : Optional[Any] = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
A : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : List[str] = normalize_df[:, 0].tolist()
A : int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Tuple = x[: len(x) - 1]
A : str = x[len(x) - 1 :]
# for linear regression & sarimax
A : Tuple = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : List[Any] = total_user[len(total_user) - 1 :]
A : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 305
| 0
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase__ = """
import os
"""
lowerCamelCase__ = """
def foo():
import os
return False
"""
lowerCamelCase__ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
lowerCamelCase__ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase__ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
except:
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
lowerCamelCase__ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = os.path.join(_UpperCamelCase , 'test_file.py' )
with open(_UpperCamelCase , 'w' ) as _tmp_file:
_tmp_file.write(_UpperCamelCase )
__lowerCAmelCase : List[Any] = get_imports(_UpperCamelCase )
assert parsed_imports == ["os"]
| 86
|
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 1
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : tuple , _UpperCamelCase : Path , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[int]=False , ) -> Optional[int]:
"""simple docstring"""
output_path.parent.mkdir(parents=lowercase__ , exist_ok=lowercase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , use_external_data_format=lowercase__ , enable_onnx_checker=lowercase__ , opset_version=lowercase__ , )
else:
export(
lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , opset_version=lowercase__ , )
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : bool = False ) -> Any:
"""simple docstring"""
snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
snake_case = """cpu"""
snake_case = StableDiffusionPipeline.from_pretrained(lowercase__ , torch_dtype=lowercase__ ).to(lowercase__ )
snake_case = Path(lowercase__ )
# TEXT ENCODER
snake_case = pipeline.text_encoder.config.max_position_embeddings
snake_case = pipeline.text_encoder.config.hidden_size
snake_case = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=lowercase__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowercase__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=lowercase__ , )
del pipeline.text_encoder
# UNET
snake_case = pipeline.unet.config.in_channels
snake_case = pipeline.unet.config.sample_size
snake_case = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
torch.randn(2 ).to(device=lowercase__ , dtype=lowercase__ ),
torch.randn(2 , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
False,
) , output_path=lowercase__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=lowercase__ , use_external_data_format=lowercase__ , )
snake_case = str(unet_path.absolute().as_posix() )
snake_case = os.path.dirname(lowercase__ )
snake_case = onnx.load(lowercase__ )
# clean up existing tensor files
shutil.rmtree(lowercase__ )
os.mkdir(lowercase__ )
# collate external tensor files into one
onnx.save_model(
lowercase__ , lowercase__ , save_as_external_data=lowercase__ , all_tensors_to_one_file=lowercase__ , location='weights.pb' , convert_attribute=lowercase__ , )
del pipeline.unet
# VAE ENCODER
snake_case = pipeline.vae
snake_case = vae_encoder.config.in_channels
snake_case = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case = lambda _UpperCamelCase , _UpperCamelCase : vae_encoder.encode(lowercase__ , lowercase__ )[0].sample()
onnx_export(
lowercase__ , model_args=(
torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowercase__ , )
# VAE DECODER
snake_case = pipeline.vae
snake_case = vae_decoder.config.latent_channels
snake_case = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case = vae_encoder.decode
onnx_export(
lowercase__ , model_args=(
torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowercase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case = pipeline.safety_checker
snake_case = safety_checker.config.vision_config.num_channels
snake_case = safety_checker.config.vision_config.image_size
snake_case = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowercase__ , lowercase__ , lowercase__ , ).to(device=lowercase__ , dtype=lowercase__ ),
torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=lowercase__ , )
del pipeline.safety_checker
snake_case = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
snake_case = pipeline.feature_extractor
else:
snake_case = None
snake_case = None
snake_case = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=lowercase__ , feature_extractor=lowercase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowercase__ )
print('ONNX pipeline saved to' , lowercase__ )
del pipeline
del onnx_pipeline
snake_case = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 356
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="None" , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = relative_attention
snake_case = position_biased_input
snake_case = pos_att_type
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaModel(config=lowerCAmelCase )
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case = [input_ids, input_mask]
snake_case = model(lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaForMaskedLM(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFDebertaVaForSequenceClassification(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFDebertaVaForTokenClassification(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : Any = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[Any] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
snake_case = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
snake_case = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 )
| 149
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
snake_case_ = logging.get_logger(__name__)
snake_case_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
snake_case_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
default=UpperCamelCase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(UpperCamelCase_ )} )
A_ : str = field(
default=UpperCamelCase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
A_ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : int = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
A_ : int = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
A_ : int = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
A_ : bool = field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
A_ : bool = field(
default=UpperCamelCase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
A_ : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
A_ : int = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
A_ : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
A_ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
A_ : Union[str, Any] = """train"""
A_ : List[Any] = """dev"""
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
A_ : SquadDataTrainingArguments
A_ : List[SquadFeatures]
A_ : Split
A_ : bool
def __init__(self : Union[str, Any] , a__ : List[str] , a__ : Optional[Any] , a__ : Any = None , a__ : Optional[int] = Split.train , a__ : Any = False , a__ : str = None , a__ : str = "pt" , ):
"""simple docstring"""
__snake_case = args
__snake_case = is_language_sensitive
__snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_a , _a ):
try:
__snake_case = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__snake_case = mode
# Load data features from cache or dataset file
__snake_case = """v2""" if args.version_2_with_negative else """v1"""
__snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + """.lock"""
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
__snake_case = time.time()
__snake_case = torch.load(_a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__snake_case = self.old_features["""features"""]
__snake_case = self.old_features.get('''dataset''' , _a )
__snake_case = self.old_features.get('''examples''' , _a )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
__snake_case = self.processor.get_dev_examples(args.data_dir )
else:
__snake_case = self.processor.get_train_examples(args.data_dir )
__snake_case = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_a , )
__snake_case = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__(self : Tuple ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Optional[int] , a__ : int ):
"""simple docstring"""
__snake_case = self.features[i]
__snake_case = torch.tensor(feature.input_ids , dtype=torch.long )
__snake_case = torch.tensor(feature.attention_mask , dtype=torch.long )
__snake_case = torch.tensor(feature.token_type_ids , dtype=torch.long )
__snake_case = torch.tensor(feature.cls_index , dtype=torch.long )
__snake_case = torch.tensor(feature.p_mask , dtype=torch.float )
__snake_case = torch.tensor(feature.is_impossible , dtype=torch.float )
__snake_case = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__snake_case = torch.tensor(feature.start_position , dtype=torch.long )
__snake_case = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 24
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a :Tuple = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
a :int = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = """https://pypi.org/pypi/diffusers/json"""
SCREAMING_SNAKE_CASE__ : str = json.loads(request.urlopen(__lowerCAmelCase ).read() )["""releases"""].keys()
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : version.Version(__lowerCAmelCase ) )
def _lowercase ( ) -> Optional[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = Path(__lowerCAmelCase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
init_hf_modules()
SCREAMING_SNAKE_CASE__ : List[Any] = Path(__lowerCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( __lowerCAmelCase ) -> Tuple:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : int = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , __lowerCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__lowerCAmelCase ) )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[str] = [module_file]
SCREAMING_SNAKE_CASE__ : str = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE__ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : int = Path(__lowerCAmelCase ).parent
SCREAMING_SNAKE_CASE__ : Dict = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE__ : Any = [F'''{f}.py''' for f in new_import_files]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__lowerCAmelCase ) == 0
all_relative_imports.extend(__lowerCAmelCase )
return all_relative_imports
def _lowercase ( __lowerCAmelCase ) -> Any:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.findall("""^\s*import\s+(\S+)\s*$""" , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , __lowerCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE__ : str = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE__ : Tuple = list(set(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = []
for imp in imports:
try:
importlib.import_module(__lowerCAmelCase )
except ImportError:
missing_packages.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'''{', '.join(__lowerCAmelCase )}. Run `pip install {' '.join(__lowerCAmelCase )}`''' )
return get_relative_imports(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : str = module_path.replace(os.path.sep , """.""" )
SCREAMING_SNAKE_CASE__ : Any = importlib.import_module(__lowerCAmelCase )
if class_name is None:
return find_pipeline_class(__lowerCAmelCase )
return getattr(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE__ : Tuple = dict(inspect.getmembers(__lowerCAmelCase , inspect.isclass ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __lowerCAmelCase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = cls
return pipeline_class
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
SCREAMING_SNAKE_CASE__ : str = str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE__ : List[str] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE__ : List[Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE__ : Any = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE__ : List[str] = F'''v{revision}'''
elif revision == "main":
SCREAMING_SNAKE_CASE__ : int = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE__ : int = COMMUNITY_PIPELINES_URL.format(revision=__lowerCAmelCase , pipeline=__lowerCAmelCase )
try:
SCREAMING_SNAKE_CASE__ : Dict = cached_download(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = """git"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ : Any = hf_hub_download(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE__ : Optional[int] = check_imports(__lowerCAmelCase )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE__ : Any = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = Path(__lowerCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE__ : Tuple = F'''{module_needed}.py'''
shutil.copy(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE__ : Optional[int] = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = model_info(__lowerCAmelCase , revision=__lowerCAmelCase , token=__lowerCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE__ : Optional[Any] = submodule_path / commit_hash
SCREAMING_SNAKE_CASE__ : Optional[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__lowerCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__lowerCAmelCase , F'''{module_needed}.py''' , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return os.path.join(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_cached_module_file(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return get_class_in_module(__lowerCAmelCase , final_module.replace(""".py""" , """""" ) )
| 132
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class lowerCamelCase__ ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str] ):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=_UpperCAmelCase , )
assert hasattr(self , "env" )
def _lowerCamelCase ( self : int , _a : Optional[int]=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _lowerCamelCase ( self : List[str] , _a : Optional[int] ):
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def _lowerCamelCase ( self : Optional[Any] ):
# create estimator
a__: List[Any] =self.create_estimator()
# run training
estimator.fit()
# result dataframe
a__: Optional[int] =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a__: Optional[int] =list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
a__: Dict =list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a__: Tuple =(
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _UpperCAmelCase )
| 368
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = 42
class lowerCamelCase__ ( _a , _a ):
@register_to_config
def __init__( self : Union[str, Any] , _a : int = 1_6 , _a : int = 8_8 , _a : Optional[int] = None , _a : Optional[int] = None , _a : int = 1 , _a : float = 0.0 , _a : int = 3_2 , _a : Optional[int] = None , _a : bool = False , _a : Optional[int] = None , _a : str = "geglu" , _a : bool = True , _a : bool = True , ):
super().__init__()
a__: List[Any] =num_attention_heads
a__: Tuple =attention_head_dim
a__: Dict =num_attention_heads * attention_head_dim
a__: List[Any] =in_channels
a__: Dict =torch.nn.GroupNorm(num_groups=_a , num_channels=_a , eps=1e-6 , affine=_a )
a__: str =nn.Linear(_a , _a )
# 3. Define transformers blocks
a__: Optional[int] =nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , cross_attention_dim=_a , activation_fn=_a , attention_bias=_a , double_self_attention=_a , norm_elementwise_affine=_a , )
for d in range(_a )
] )
a__: Any =nn.Linear(_a , _a )
def _lowerCamelCase ( self : List[str] , _a : str , _a : Optional[Any]=None , _a : int=None , _a : int=None , _a : Optional[int]=1 , _a : Tuple=None , _a : bool = True , ):
a__ , a__ , a__ , a__: int =hidden_states.shape
a__: str =batch_frames // num_frames
a__: Any =hidden_states
a__: Optional[int] =hidden_states[None, :].reshape(_a , _a , _a , _a , _a )
a__: Union[str, Any] =hidden_states.permute(0 , 2 , 1 , 3 , 4 )
a__: Tuple =self.norm(_a )
a__: Union[str, Any] =hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _a , _a )
a__: Dict =self.proj_in(_a )
# 2. Blocks
for block in self.transformer_blocks:
a__: str =block(
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , class_labels=_a , )
# 3. Output
a__: Any =self.proj_out(_a )
a__: Optional[int] =(
hidden_states[None, None, :]
.reshape(_a , _a , _a , _a , _a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
a__: Dict =hidden_states.reshape(_a , _a , _a , _a )
a__: List[str] =hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_a )
| 42
| 0
|
import argparse
import os
import re
__a : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
__a : List[str] = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__a : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__a : int = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__a : Tuple = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__a : int = re.compile(r"""\[([^\]]+)\]""")
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = _re_indent.search(lowercase )
return "" if search is None else search.groups()[0]
def UpperCAmelCase ( lowercase , lowercase="" , lowercase=None , lowercase=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowercase ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(lowercase ) and (end_prompt is None or not lines[index].startswith(lowercase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowercase ) )
if index < len(lowercase ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(lowercase ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase ) > 0:
blocks.append('''\n'''.join(lowercase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def _inner(lowercase ):
return key(lowercase ).lower().replace('''_''' , '''''' )
return _inner
def UpperCAmelCase ( lowercase , lowercase=None ):
"""simple docstring"""
def noop(lowercase ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(lowercase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(lowercase )[0].isupper() and not key(lowercase ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(lowercase )[0].isupper()]
__lowercase = ignore_underscore(lowercase )
return sorted(lowercase , key=lowercase ) + sorted(lowercase , key=lowercase ) + sorted(lowercase , key=lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def _replace(lowercase ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(lowercase )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(lowercase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(lowercase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(lowercase , key=lambda lowercase : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(lowercase )] )
return "\n".join(lowercase )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , lowercase )
return import_statement
def UpperCAmelCase ( lowercase , lowercase=True ):
"""simple docstring"""
with open(lowercase , encoding='''utf-8''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
lowercase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(lowercase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(lowercase )
else:
line_idx += 1
if line_idx >= len(lowercase ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(lowercase , indent_level=lowercase )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(lowercase ).groups()[0] if pattern.search(lowercase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(lowercase ) if key is not None]
__lowercase = [x[0] for x in sorted(lowercase , key=lambda lowercase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(lowercase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowercase ) )
def UpperCAmelCase ( lowercase=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(lowercase , '''__init__.py''' ) , check_only=lowercase )
if result:
__lowercase = [os.path.join(lowercase , '''__init__.py''' )]
if len(lowercase ) > 0:
raise ValueError(F"Would overwrite {len(lowercase )} files, run `make style`." )
if __name__ == "__main__":
__a : int = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__a : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 210
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''num_attention_heads''' ) )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=64 , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=16 , lowerCAmelCase__=[1_28, 2_56, 3_84] , lowerCAmelCase__=[4, 6, 8] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=[16, 16, 16] , lowerCAmelCase__=0 , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = kernel_size
__lowercase = stride
__lowercase = padding
__lowercase = hidden_sizes
__lowercase = num_attention_heads
__lowercase = depths
__lowercase = key_dim
__lowercase = drop_path_rate
__lowercase = patch_size
__lowercase = attention_ratio
__lowercase = mlp_ratio
__lowercase = initializer_range
__lowercase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = initializer_range
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = LevitModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = LevitForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a : List[str] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a : int = False
__a : Dict = False
__a : Optional[Any] = False
__a : Optional[int] = False
__a : Dict = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = LevitModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__lowercase = (self.model_tester.image_size, self.model_tester.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for _ in range(4 ):
__lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
'''simple docstring'''
__lowercase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase = False
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowercase = model_class(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
__lowercase = model(**lowerCAmelCase__ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowercase = problem_type['''title''']
__lowercase = problem_type['''num_labels''']
__lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if problem_type["num_labels"] > 1:
__lowercase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
__lowercase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase__ ) as warning_list:
__lowercase = model(**lowerCAmelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LevitModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowerCAmelCase__ )
# verify the logits
__lowercase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__lowercase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 210
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[int] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 204
| 0
|
"""simple docstring"""
import random
class a :
"""simple docstring"""
@staticmethod
def UpperCamelCase ( UpperCamelCase: str ):
"""simple docstring"""
A__ = [ord(_UpperCAmelCase ) for i in text]
A__ = []
A__ = []
for i in plain:
A__ = random.randint(1 , 3_00 )
A__ = (i + k) * k
cipher.append(_UpperCAmelCase )
key.append(_UpperCAmelCase )
return cipher, key
@staticmethod
def UpperCamelCase ( UpperCamelCase: list[int] , UpperCamelCase: list[int] ):
"""simple docstring"""
A__ = []
for i in range(len(_UpperCAmelCase ) ):
A__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_UpperCAmelCase ) )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[str] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 335
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return x + 2
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
lowercase__ = """x = y"""
lowercase__ = {"""y""": 5}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 5, """y""": 5} )
def lowerCamelCase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = """y = add_two(x)"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = """x = 3"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3} )
def lowerCamelCase__ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """x = 3\ny = 5"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 5} )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = """text = f'This is x: {x}.'"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """if x <= 3:\n y = 2\nelse:\n y = 5"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 2} )
lowercase__ = {"""x""": 8}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 8, """y""": 5} )
def lowerCamelCase__ (self : Dict ) -> int:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [3, 5] )
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCamelCase__ (self : Any ) -> int:
"""simple docstring"""
lowercase__ = """y = x"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {} , state=_UpperCAmelCase )
assert result == 3
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """y""": 3} )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = """test_list = [x, add_two(x)]\ntest_list[1]"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_list""": [3, 5]} )
lowercase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
lowercase__ = {"""x""": 3}
lowercase__ = evaluate(_UpperCAmelCase , {"""add_two""": add_two} , state=_UpperCAmelCase )
assert result == 5
self.assertDictEqual(_UpperCAmelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """x = 0\nfor i in range(3):\n x = i"""
lowercase__ = {}
lowercase__ = evaluate(_UpperCAmelCase , {"""range""": range} , state=_UpperCAmelCase )
assert result == 2
self.assertDictEqual(_UpperCAmelCase , {"""x""": 2, """i""": 2} )
| 305
| 0
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(__lowerCAmelCase ), magnitude * sin(__lowerCAmelCase )]
return [magnitude * cos(radians(__lowerCAmelCase ) ), magnitude * sin(radians(__lowerCAmelCase ) )]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-1 ) -> bool:
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = cross(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : float = sum(__lowerCAmelCase )
return abs(__lowerCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
a :Optional[Any] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
a :NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a :Union[str, Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a :int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a :List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
a :Tuple = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
from math import loga
def _lowercase ( __lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ : Dict = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ : List[Any] = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
UpperCAmelCase__ : Any = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : Tuple = NllbTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
SCREAMING_SNAKE_CASE__ : int = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : str = vocab_file
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : Dict = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ : Any = src_lang if src_lang is not None else """eng_Latn"""
SCREAMING_SNAKE_CASE__ : Dict = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE__ : Dict = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tgt_lang_id
return inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "eng_Latn" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "fra_Latn" , **SCREAMING_SNAKE_CASE__ , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = src_lang
SCREAMING_SNAKE_CASE__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : List[str] = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : int = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : str = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 25
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """pegasus"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: List[str] , __lowerCamelCase: Dict=5_0265 , __lowerCamelCase: int=1024 , __lowerCamelCase: Dict=12 , __lowerCamelCase: Dict=4096 , __lowerCamelCase: str=16 , __lowerCamelCase: List[Any]=12 , __lowerCamelCase: int=4096 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: List[Any]=1024 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: str=False , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Optional[int]=1 , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = vocab_size
UpperCamelCase__: List[Any] = max_position_embeddings
UpperCamelCase__: Tuple = d_model
UpperCamelCase__: str = encoder_ffn_dim
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = encoder_attention_heads
UpperCamelCase__: Tuple = decoder_ffn_dim
UpperCamelCase__: int = decoder_layers
UpperCamelCase__: List[str] = decoder_attention_heads
UpperCamelCase__: int = dropout
UpperCamelCase__: List[str] = attention_dropout
UpperCamelCase__: Tuple = activation_dropout
UpperCamelCase__: Optional[int] = activation_function
UpperCamelCase__: Dict = init_std
UpperCamelCase__: Optional[Any] = encoder_layerdrop
UpperCamelCase__: Any = decoder_layerdrop
UpperCamelCase__: Optional[int] = use_cache
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self.d_model
| 149
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = ["""image_processor""", """tokenizer"""]
snake_case__ : Optional[Any] = """OwlViTImageProcessor"""
snake_case__ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[Any] , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Union[str, Any] ):
UpperCamelCase :Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCamelCase , )
UpperCamelCase :Union[str, Any] = kwargs.pop("""feature_extractor""" )
UpperCamelCase :int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : int , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]="max_length" , __lowerCamelCase : Union[str, Any]="np" , **__lowerCamelCase : Optional[Any] ):
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ) or (isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(text[0] , __lowerCamelCase )):
UpperCamelCase :List[str] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )]
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(text[0] , __lowerCamelCase ):
UpperCamelCase :Tuple = []
# Maximum number of queries across batch
UpperCamelCase :Tuple = max([len(__lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase ) != max_num_queries:
UpperCamelCase :Union[str, Any] = t + [""" """] * (max_num_queries - len(__lowerCamelCase ))
UpperCamelCase :List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
encodings.append(__lowerCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCamelCase :Optional[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase :int = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCamelCase :Union[str, Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase :Any = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCamelCase :Optional[int] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCamelCase :Optional[int] = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCamelCase :Dict = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase :List[str] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCamelCase :int = BatchEncoding()
UpperCamelCase :Any = input_ids
UpperCamelCase :Tuple = attention_mask
if query_images is not None:
UpperCamelCase :str = BatchEncoding()
UpperCamelCase :Dict = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ).pixel_values
UpperCamelCase :Optional[int] = query_pixel_values
if images is not None:
UpperCamelCase :Optional[Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
UpperCamelCase :List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCamelCase :List[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def _A ( self : Optional[int] , *__lowerCamelCase : str , **__lowerCamelCase : int ):
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : str , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ):
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : int , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ):
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : List[str] ):
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def _A ( self : Dict ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCamelCase , )
return self.image_processor_class
@property
def _A ( self : List[Any] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCamelCase , )
return self.image_processor
| 62
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase :List[Any] = model_type_to_module_name(__magic_name__ )
UpperCamelCase :Union[str, Any] = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(__magic_name__ , __magic_name__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__magic_name__ , """__name__""" , __magic_name__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase :List[str] = importlib.import_module("""transformers""" )
if hasattr(__magic_name__ , __magic_name__ ):
return getattr(__magic_name__ , __magic_name__ )
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, os.PathLike] , __magic_name__ : Optional[Union[str, os.PathLike]] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : Optional[Dict[str, str]] = None , __magic_name__ : Optional[Union[bool, str]] = None , __magic_name__ : Optional[str] = None , __magic_name__ : bool = False , **__magic_name__ : Any , ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = get_file_from_repo(
__magic_name__ , __magic_name__ , cache_dir=__magic_name__ , force_download=__magic_name__ , resume_download=__magic_name__ , proxies=__magic_name__ , use_auth_token=__magic_name__ , revision=__magic_name__ , local_files_only=__magic_name__ , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__magic_name__ , encoding="""utf-8""" ) as reader:
return json.load(__magic_name__ )
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def _A ( cls : List[str] , __lowerCamelCase : List[Any] , **__lowerCamelCase : int ):
UpperCamelCase :Optional[Any] = kwargs.pop("""config""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = kwargs.pop("""trust_remote_code""" , __lowerCamelCase )
UpperCamelCase :Any = True
UpperCamelCase , UpperCamelCase :int = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :Union[str, Any] = config_dict.get("""image_processor_type""" , __lowerCamelCase )
UpperCamelCase :int = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase :Optional[Any] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase :Optional[int] = config_dict.pop("""feature_extractor_type""" , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCamelCase :str = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase :Any = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCamelCase :Dict = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
UpperCamelCase :Optional[Any] = getattr(__lowerCamelCase , """image_processor_type""" , __lowerCamelCase )
if hasattr(__lowerCamelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase :Any = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCamelCase :Tuple = image_processor_class_from_name(__lowerCamelCase )
UpperCamelCase :List[Any] = image_processor_auto_map is not None
UpperCamelCase :Any = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase :Optional[int] = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
UpperCamelCase :Optional[int] = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
UpperCamelCase :int = kwargs.pop("""code_revision""" , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase :int = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _A ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 62
| 1
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ )
if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ):
lowerCAmelCase__ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ):
self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase__ : Union[str, Any] = [
getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
assert len(A_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ):
def alpha_bar(A_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = []
for i in range(A_ ):
lowerCAmelCase__ : str = i / num_diffusion_timesteps
lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) )
return jnp.array(A_ , dtype=A_ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : int = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase__ : str = 1.0 - betas
lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 )
return cls(
alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Any = state.alphas_cumprod
lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten()
lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape )
lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 106
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : Optional[int] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase , lowercase , lowercase : Tuple = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 42
| 0
|
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
snake_case : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case : Optional[int] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case : Optional[int] = min(__lowerCamelCase , __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 10
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
snake_case : Union[str, Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
snake_case : Optional[int] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
snake_case : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
CustomConfig.register_for_auto_class()
snake_case : Union[str, Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
snake_case : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
snake_case : Tuple = c.n_embd + 1 # int
snake_case : str = c.resid_pdrop + 1.0 # float
snake_case : Optional[Any] = not c.scale_attn_weights # bool
snake_case : Optional[int] = c.summary_type + "foo" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = PretrainedConfig()
snake_case : List[str] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
snake_case : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f""" {', '.join(snake_case__ )}.""" )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
snake_case : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = mock.Mock()
snake_case : Optional[int] = 5_00
snake_case : Any = {}
snake_case : str = HTTPError
snake_case : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case : List[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
snake_case : List[str] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def _SCREAMING_SNAKE_CASE (self : int ) -> str:
'''simple docstring'''
snake_case : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
snake_case : int = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
snake_case : str = 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
snake_case : str = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
snake_case : List[str] = ["config.42.0.0.json"]
snake_case : Optional[int] = 7_68
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
snake_case : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
snake_case : Optional[int] = "v4.0.0"
snake_case , snake_case : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
snake_case : int = "v3.0.0"
snake_case : int = old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 10
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__lowerCAmelCase : Tuple = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCAmelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __magic_name__ ( A : str ):
'''simple docstring'''
with open(A, "rb" ) as f:
a = Image.open(A )
return im.convert("RGB" )
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the training data."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
SCREAMING_SNAKE_CASE_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE_ : str = field(default=_UpperCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __magic_name__ ( A : Dict ):
'''simple docstring'''
a = torch.stack([example["pixel_values"] for example in examples] )
a = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification", A, A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a = training_args.get_process_log_level()
logger.setLevel(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task="image-classification", use_auth_token=True if model_args.use_auth_token else None, )
else:
a = {}
if data_args.train_dir is not None:
a = os.path.join(data_args.train_dir, "**" )
if data_args.validation_dir is not None:
a = os.path.join(data_args.validation_dir, "**" )
a = load_dataset(
"imagefolder", data_files=A, cache_dir=model_args.cache_dir, task="image-classification", )
# If we don't have a validation split, split off a percentage of train as validation.
a = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, A ) and data_args.train_val_split > 0.0:
a = dataset["train"].train_test_split(data_args.train_val_split )
a = split["train"]
a = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a = dataset["train"].features["labels"].names
a , a = {}, {}
for i, label in enumerate(A ):
a = str(A )
a = label
# Load the accuracy metric from the datasets package
a = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A : int ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(A ), labelaid=A, idalabel=A, finetuning_task="image-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
a = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=A, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
a = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a = image_processor.size["shortest_edge"]
else:
a = (image_processor.size["height"], image_processor.size["width"])
a = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
a = Compose(
[
RandomResizedCrop(A ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a = Compose(
[
Resize(A ),
CenterCrop(A ),
ToTensor(),
normalize,
] )
def train_transforms(A : Dict ):
a = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A : List[str] ):
a = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
a = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
a = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A )
# Initalize our trainer
a = Trainer(
model=A, args=A, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=A, tokenizer=A, data_collator=A, )
# Training
if training_args.do_train:
a = None
if training_args.resume_from_checkpoint is not None:
a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a = last_checkpoint
a = trainer.train(resume_from_checkpoint=A )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a = trainer.evaluate()
trainer.log_metrics("eval", A )
trainer.save_metrics("eval", A )
# Write model card and (optionally) push to hub
a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A )
else:
trainer.create_model_card(**A )
if __name__ == "__main__":
main()
| 107
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _SCREAMING_SNAKE_CASE ( lowercase : str = "laptop" ):
'''simple docstring'''
lowerCamelCase_ = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCamelCase_ = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowerCamelCase_ = BeautifulSoup(requests.get(lowercase , headers=lowercase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase_ = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
lowerCamelCase_ = item.ha.text
lowerCamelCase_ = 'https://www.amazon.in/' + item.ha.a['href']
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowerCamelCase_ = 'Not available'
try:
lowerCamelCase_ = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowerCamelCase_ = ''
try:
lowerCamelCase_ = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 1_00 )
except ValueError:
lowerCamelCase_ = float('nan' )
except AttributeError:
pass
lowerCamelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase_ = ' '
lowerCamelCase_ = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase : Tuple = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 204
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 62
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Tuple="attention" ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
UpperCamelCase :Any = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
UpperCamelCase :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
UpperCamelCase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
UpperCamelCase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
if split_mlp_wi:
UpperCamelCase :Tuple = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
UpperCamelCase :Optional[int] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
UpperCamelCase :Union[str, Any] = (wi_a, wi_a)
else:
UpperCamelCase :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
UpperCamelCase :List[str] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , *, __magic_name__ : int , __magic_name__ : bool , __magic_name__ : bool = False ) -> List[str]:
"""simple docstring"""
UpperCamelCase :str = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase :int = {"""/""".join(__magic_name__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __magic_name__ )
UpperCamelCase :Tuple = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :List[Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = tax_attention_lookup(__magic_name__ , __magic_name__ , """encoder""" , """attention""" )
UpperCamelCase :Dict = layer_norm
UpperCamelCase :str = k.T
UpperCamelCase :int = o.T
UpperCamelCase :Optional[int] = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :List[Any] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """encoder""" , __magic_name__ )
UpperCamelCase :Dict = layer_norm
if split_mlp_wi:
UpperCamelCase :Union[str, Any] = wi[0].T
UpperCamelCase :List[str] = wi[1].T
else:
UpperCamelCase :str = wi.T
UpperCamelCase :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[Any] = tax_relpos_bias_lookup(
__magic_name__ , __magic_name__ , """encoder""" ).T
UpperCamelCase :Dict = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
UpperCamelCase :Optional[Any] = tax_relpos_bias_lookup(
__magic_name__ , 0 , """encoder""" ).T
UpperCamelCase :str = tax_relpos_bias_lookup(
__magic_name__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__magic_name__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """self_attention""" )
UpperCamelCase :Any = layer_norm
UpperCamelCase :Tuple = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :List[Any] = q.T
UpperCamelCase :Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :List[str] = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = tax_attention_lookup(__magic_name__ , __magic_name__ , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase :Union[str, Any] = layer_norm
UpperCamelCase :int = k.T
UpperCamelCase :Union[str, Any] = o.T
UpperCamelCase :Optional[Any] = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :Tuple = tax_layer_norm_lookup(__magic_name__ , __magic_name__ , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(__magic_name__ , __magic_name__ , """decoder""" , __magic_name__ )
UpperCamelCase :Optional[int] = layer_norm
if split_mlp_wi:
UpperCamelCase :List[Any] = wi[0].T
UpperCamelCase :Tuple = wi[1].T
else:
UpperCamelCase :Any = wi.T
UpperCamelCase :List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Optional[int] = tax_relpos_bias_lookup(__magic_name__ , __magic_name__ , """decoder""" ).T
UpperCamelCase :int = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Dict = old["""decoder/logits_dense/kernel"""].T
return new
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : bool ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase :Dict = state_dict["""shared.weight"""]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = checkpoints.load_tax_checkpoint(__magic_name__ )
UpperCamelCase :Optional[Any] = convert_tax_to_pytorch(
__magic_name__ , num_layers=config.num_layers , is_encoder_only=__magic_name__ , scalable_attention=__magic_name__ )
UpperCamelCase :Optional[int] = make_state_dict(__magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ , strict=__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Tuple = MTaConfig.from_json_file(__magic_name__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :Optional[Any] = UMTaEncoderModel(__magic_name__ )
else:
UpperCamelCase :Tuple = UMTaForConditionalGeneration(__magic_name__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__magic_name__ )
# Verify that we can load the checkpoint.
model.from_pretrained(__magic_name__ )
print("""Done""" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCAmelCase_ : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 62
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.