code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 1
|
'''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1
| 1
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
if "://" in dataset_path:
A__ = dataset_path.split("://" )[1]
return dataset_path
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
A__ = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) , fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ , lowercase_ , recursive=lowercase_ )
def _SCREAMING_SNAKE_CASE ( ) -> None:
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A__ = None
A__ = None
A__ = threading.Lock()
| 230
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=13 , snake_case_ : Optional[int]=7 , snake_case_ : Any=True , snake_case_ : List[Any]=True , snake_case_ : Dict=True , snake_case_ : Dict=True , snake_case_ : List[str]=99 , snake_case_ : Union[str, Any]=16 , snake_case_ : Any=36 , snake_case_ : List[Any]=6 , snake_case_ : Optional[Any]=6 , snake_case_ : Optional[Any]=6 , snake_case_ : Any=37 , snake_case_ : int="gelu" , snake_case_ : Any=0.1 , snake_case_ : int=0.1 , snake_case_ : str=512 , snake_case_ : Union[str, Any]=16 , snake_case_ : Tuple=2 , snake_case_ : Any=0.02 , snake_case_ : Optional[Any]=3 , snake_case_ : Union[str, Any]=4 , snake_case_ : Any=None , ) -> Tuple:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_hidden_groups
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def __magic_name__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : int ) -> Dict:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = AlbertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
A__ = model(snake_case_ , token_type_ids=snake_case_ )
A__ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : int , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> int:
'''simple docstring'''
A__ = AlbertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , sentence_order_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __magic_name__ ( self : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> Dict:
'''simple docstring'''
A__ = AlbertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
A__ = AlbertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Any ) -> Any:
'''simple docstring'''
A__ = self.num_labels
A__ = AlbertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Tuple , snake_case_ : Dict , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = AlbertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.num_choices
A__ = AlbertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def __magic_name__ ( self : Tuple , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : List[Any]=False ) -> Optional[int]:
'''simple docstring'''
A__ = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = AlbertModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Any ) -> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def __magic_name__ ( self : int ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*snake_case_ )
@slow
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = AlbertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __magic_name__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ = AlbertModel.from_pretrained("albert-base-v2" )
A__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(snake_case_ , attention_mask=snake_case_ )[0]
A__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case_ )
A__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) )
| 230
| 1
|
from collections.abc import Callable
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : float = a
_A : float = b
if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCAmelCase ) == 0:
return b
elif (
function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
_A : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCAmelCase ) == 0:
return mid
elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0:
_A : Optional[int] = mid
else:
_A : Optional[Any] = mid
_A : Any = start + (end - start) / 2.0
return mid
def lowerCAmelCase_ ( snake_case_ ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 26
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50
| 0
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : str = torch.load(_UpperCamelCase , map_location="""cpu""" )
if "model" in sd.keys():
lowercase : Optional[Any] = torch.load(_UpperCamelCase , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
lowercase : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
lowercase : Any = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowercase : List[Any] = sd.pop(_UpperCamelCase )
lowercase : int = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowercase : Tuple = sd[key]
# We split QKV in separate Q,K,V
lowercase : int = key.replace(""".qkv_proj.""" , """.q_proj.""" )
lowercase : Tuple = key.replace(""".qkv_proj.""" , """.k_proj.""" )
lowercase : Any = key.replace(""".qkv_proj.""" , """.v_proj.""" )
lowercase : Dict = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowercase , lowercase , lowercase : Dict = torch.split(_UpperCamelCase , depth // 3 , dim=0 )
lowercase : int = q
lowercase : Any = k
lowercase : Dict = v
del sd[key]
return sd
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Dict:
lowercase : List[Any] = load_checkpoint(_UpperCamelCase )
if config is not None:
lowercase : List[Any] = OPTConfig.from_pretrained(_UpperCamelCase )
else:
lowercase : List[Any] = OPTConfig()
lowercase : int = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowercase : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 368
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _snake_case( ) -> tuple[list[int], int]:
lowercase : List[Any] = [randint(-1_000 , 1_000 ) for i in range(10 )]
lowercase : Tuple = randint(-5_000 , 5_000 )
return (arr, r)
lowercase : List[Any] = make_dataset()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, ...]:
for triplet in permutations(SCREAMING_SNAKE_CASE__ , 3 ):
if sum(SCREAMING_SNAKE_CASE__ ) == target:
return tuple(sorted(SCREAMING_SNAKE_CASE__ ) )
return (0, 0, 0)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> tuple[int, int, int]:
arr.sort()
lowercase : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 ):
lowercase , lowercase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _snake_case( ) -> tuple[float, float]:
lowercase : Dict = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
lowercase : Tuple = """
triplet_sum1(*dataset)
"""
lowercase : int = """
triplet_sum2(*dataset)
"""
lowercase : str = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
lowercase : Dict = repeat(setup=SCREAMING_SNAKE_CASE__ , stmt=SCREAMING_SNAKE_CASE__ , repeat=5 , number=10_000 )
return (min(SCREAMING_SNAKE_CASE__ ), min(SCREAMING_SNAKE_CASE__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase : Union[str, Any] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 285
| 0
|
from __future__ import annotations
from collections import namedtuple
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct_text_model"
snake_case = ["past_key_values"]
snake_case = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_0244 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
A_ : Optional[int] = vocab_size
A_ : Any = hidden_size
A_ : Optional[Any] = d_kv
A_ : int = d_ff
A_ : int = num_layers
A_ : Dict = num_heads
A_ : Any = relative_attention_num_buckets
A_ : int = relative_attention_max_distance
A_ : Optional[Any] = dropout_rate
A_ : Optional[Any] = layer_norm_epsilon
A_ : List[Any] = initializer_factor
A_ : Optional[int] = use_cache
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = decoder_start_token_id
# for backwards compatibility
A_ : int = dense_act_fn
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ , A_ : Union[str, Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A_ : Any = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct_vision_model"
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1e-10 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , **_SCREAMING_SNAKE_CASE , )->Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Dict = hidden_size
A_ : Union[str, Any] = patch_embed_hidden_size
A_ : Optional[Any] = d_ff
A_ : Optional[Any] = dropout_rate
A_ : int = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[str] = initializer_range
A_ : List[str] = initializer_factor
A_ : Union[str, Any] = attention_dropout
A_ : Union[str, Any] = layer_norm_eps
A_ : Dict = dense_act_fn
A_ : Union[str, Any] = seq_len
A_ : Optional[Any] = relative_attention_num_buckets
A_ : Tuple = relative_attention_max_distance
A_ : List[Any] = d_kv
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
A_ , A_ : str = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A_ : Any = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "pix2struct"
snake_case = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->Any:
'''simple docstring'''
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text_config is None:
A_ : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A_ : List[Any] = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A_ : int = PixaStructTextConfig(**_SCREAMING_SNAKE_CASE )
A_ : Dict = PixaStructVisionConfig(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.text_config.decoder_start_token_id
A_ : Tuple = self.text_config.pad_token_id
A_ : Union[str, Any] = self.text_config.eos_token_id
A_ : str = initializer_factor
A_ : Tuple = initializer_range
A_ : List[str] = self.initializer_range
A_ : int = self.initializer_range
A_ : List[Any] = is_vqa
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[Any] = copy.deepcopy(self.__dict__ )
A_ : Dict = self.text_config.to_dict()
A_ : int = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output
| 186
| 1
|
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=__SCREAMING_SNAKE_CASE ):
__lowercase : Tuple = ["note_seq"]
def __init__( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['note_seq'] )
@classmethod
def __UpperCamelCase ( cls , *A_ , **A_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
@classmethod
def __UpperCamelCase ( cls , *A_ , **A_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
| 364
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = get_activation('swish' )
self.assertIsInstance(A_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = get_activation('silu' )
self.assertIsInstance(A_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = get_activation('mish' )
self.assertIsInstance(A_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = get_activation('gelu' )
self.assertIsInstance(A_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 110
| 0
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
move_disk(__UpperCAmelCase, __UpperCAmelCase )
move_tower(height - 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
print('''moving disk from''', __UpperCAmelCase, '''to''', __UpperCAmelCase )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__UpperCAmelCase, '''A''', '''B''', '''C''' )
if __name__ == "__main__":
main()
| 56
|
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : list[int] ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCAmelCase = len(snake_case__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase = array[0]
lowerCAmelCase = False
lowerCAmelCase = 1
lowerCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase = True
lowerCAmelCase = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase = longest_subsequence(snake_case__ )
if len(snake_case__ ) > len(snake_case__ ):
lowerCAmelCase = temp_array
else:
i += 1
lowerCAmelCase = [element for element in array[1:] if element >= pivot]
lowerCAmelCase = [pivot, *longest_subsequence(snake_case__ )]
if len(snake_case__ ) > len(snake_case__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Optional[int] = AltDiffusionPipeline
_SCREAMING_SNAKE_CASE : str = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(_UpperCamelCase )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**_UpperCamelCase )
lowerCAmelCase__ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase__ = 'A photo of an astronaut'
lowerCAmelCase__ = alt_pipe(**_UpperCamelCase )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**_UpperCamelCase )
lowerCAmelCase__ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase__ = alt_pipe(**_UpperCamelCase )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_UpperCamelCase )
lowerCAmelCase__ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
lowerCAmelCase__ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='numpy' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 122
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = '''microsoft/speecht5_tts'''
_SCREAMING_SNAKE_CASE : Any = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_SCREAMING_SNAKE_CASE : int = '''text_reader'''
_SCREAMING_SNAKE_CASE : List[str] = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''audio''']
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
lowerCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = self.pre_processor(text=_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
lowerCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 122
| 1
|
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> str:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : List[str] = len(set_a.intersection(__lowerCAmelCase ) )
if alternative_union:
snake_case__ : Optional[int] = len(__lowerCAmelCase ) + len(__lowerCAmelCase )
else:
snake_case__ : Dict = len(set_a.union(__lowerCAmelCase ) )
return intersection / union
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) ):
snake_case__ : Optional[Any] = [element for element in set_a if element in set_b]
if alternative_union:
snake_case__ : Dict = len(__lowerCAmelCase ) + len(__lowerCAmelCase )
return len(__lowerCAmelCase ) / union
else:
snake_case__ : Optional[Any] = set_a + [element for element in set_b if element not in set_a]
return len(__lowerCAmelCase ) / len(__lowerCAmelCase )
return len(__lowerCAmelCase ) / len(__lowerCAmelCase )
return None
if __name__ == "__main__":
A__ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A__ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 230
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A__ = '''http://www.mocksite.com/file1.txt'''
A__ = '''"text": ["foo", "foo"]'''
A__ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class a :
__lowerCAmelCase : Optional[int] = 2_00
__lowerCAmelCase : List[str] = {"""Content-Length""": """100"""}
__lowerCAmelCase : Dict = {}
def __lowerCamelCase ( self :Dict ,**__lowercase :List[Any] ):
return [bytes(__lowercase ,'''utf-8''' )]
def _lowerCAmelCase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(__lowerCAmelCase , '''request''' , __lowerCAmelCase )
snake_case__ : Union[str, Any] = URL
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[Any] = url
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = [url]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : int = {'''train''': url}
snake_case__ : Dict = '''dummy'''
snake_case__ : Any = '''downloads'''
snake_case__ : int = tmp_path
snake_case__ : Any = DownloadConfig(
cache_dir=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , use_etag=__lowerCAmelCase , )
snake_case__ : Tuple = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
snake_case__ : List[Any] = dl_manager.download(__lowerCAmelCase )
snake_case__ : Union[str, Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[int] = [downloaded_paths]
snake_case__ : Dict = [urls]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
snake_case__ : str = downloaded_paths.values()
snake_case__ : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : List[Any] = Path(__lowerCAmelCase )
snake_case__ : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : List[str] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : List[str] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
snake_case__ : str = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : Any = str(__lowerCAmelCase )
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Tuple = filename
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = [filename]
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = {'''train''': filename}
snake_case__ : Any = '''dummy'''
snake_case__ : Any = xz_file.parent
snake_case__ : List[str] = '''extracted'''
snake_case__ : Dict = DownloadConfig(
cache_dir=__lowerCAmelCase , use_etag=__lowerCAmelCase , )
snake_case__ : Dict = DownloadManager(dataset_name=__lowerCAmelCase , download_config=__lowerCAmelCase )
snake_case__ : str = dl_manager.extract(__lowerCAmelCase )
snake_case__ : int = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Dict = [extracted_paths]
snake_case__ : Optional[Any] = [paths]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assert "train" in extracted_paths.keys()
snake_case__ : int = extracted_paths.values()
snake_case__ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(__lowerCAmelCase )
snake_case__ : int = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCAmelCase , etag=__lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : List[Any] = extracted_path.read_text()
snake_case__ : List[str] = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCAmelCase , start=1 ):
snake_case__ : Any = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = request.getfixturevalue(__lowerCAmelCase )
snake_case__ : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = request.getfixturevalue(__lowerCAmelCase )
snake_case__ : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCAmelCase ) , start=1 ):
_test_jsonl(__lowerCAmelCase , __lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Any = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCAmelCase ) , start=1 ):
assert os.path.basename(__lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 230
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'The column name of the images in the files.'} )
lowercase : Optional[str] =field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the training data.'} )
lowercase : Optional[str] =field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the validation data.'} )
lowercase : Optional[float] =field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={}
if self.train_dir is not None:
lowerCamelCase_ =self.train_dir
if self.validation_dir is not None:
lowerCamelCase_ =self.validation_dir
lowerCamelCase_ =data_files if data_files else None
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowercase : str =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase : str =field(default=lowerCamelCase__ , metadata={'help': 'Name or path of preprocessor config.'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowercase : float =field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : float =field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def a_ ( __snake_case : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ =torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def a_ ( ) -> str:
"""simple docstring"""
lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ =training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase_ =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowerCamelCase_ =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase_ =None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
lowerCamelCase_ =ds['''train'''].train_test_split(data_args.train_val_split )
lowerCamelCase_ =split['''train''']
lowerCamelCase_ =split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ =ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
lowerCamelCase_ =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
lowerCamelCase_ =ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase_ =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
lowerCamelCase_ =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
lowerCamelCase_ =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase_ =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCamelCase_ =ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
lowerCamelCase_ =ds['''train'''].column_names
else:
lowerCamelCase_ =ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowerCamelCase_ =data_args.image_column_name
elif "image" in column_names:
lowerCamelCase_ ='''image'''
elif "img" in column_names:
lowerCamelCase_ ='''img'''
else:
lowerCamelCase_ =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase_ =image_processor.size['''shortest_edge''']
else:
lowerCamelCase_ =(image_processor.size['''height'''], image_processor.size['''width'''])
lowerCamelCase_ =Compose(
[
Lambda(lambda __snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__snake_case : List[Any] ):
lowerCamelCase_ =[transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowerCamelCase_ =ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowerCamelCase_ =(
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
lowerCamelCase_ =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase_ =training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase_ =Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
lowerCamelCase_ =None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ =last_checkpoint
lowerCamelCase_ =trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ =trainer.evaluate()
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
# Write model card and (optionally) push to hub
lowerCamelCase_ ={
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def a_ ( __snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 367
|
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "dpt"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=3_8_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=[2, 5, 8, 1_1] , __lowerCamelCase="project" , __lowerCamelCase=[4, 2, 1, 0.5] , __lowerCamelCase=[9_6, 1_9_2, 3_8_4, 7_6_8] , __lowerCamelCase=2_5_6 , __lowerCamelCase=-1 , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.4 , __lowerCamelCase=2_5_5 , __lowerCamelCase=0.1 , __lowerCamelCase=[1, 1_0_2_4, 2_4, 2_4] , __lowerCamelCase=[0, 1] , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(**__lowerCamelCase)
_A : Any = hidden_size
_A : Dict = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone.")
_A : int = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_A : Dict = BitConfig(**__lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
logger.info("Initializing the config with a `BiT` backbone.")
_A : Optional[Any] = BitConfig(**__lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
_A : List[str] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.")
_A : Any = backbone_featmap_shape
_A : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.")
else:
_A : List[Any] = None
_A : List[str] = None
_A : int = []
_A : Any = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : int = hidden_act
_A : int = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : str = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : Optional[Any] = image_size
_A : Any = patch_size
_A : List[Any] = num_channels
_A : Union[str, Any] = qkv_bias
_A : List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']")
_A : int = readout_type
_A : Union[str, Any] = reassemble_factors
_A : str = neck_hidden_sizes
_A : Optional[Any] = fusion_hidden_size
_A : Union[str, Any] = head_in_index
_A : List[str] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_A : str = use_auxiliary_head
_A : Union[str, Any] = auxiliary_loss_weight
_A : Any = semantic_loss_ignore_index
_A : Any = semantic_classifier_dropout
def _lowerCamelCase ( self) -> Dict:
_A : str = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_A : Optional[Any] = self.backbone_config.to_dict()
_A : Optional[int] = self.__class__.model_type
return output
| 11
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=UpperCamelCase__ )
if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 285
| 0
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
if args.model_type == "roberta":
_SCREAMING_SNAKE_CASE = RobertaForMaskedLM.from_pretrained(args.model_name)
_SCREAMING_SNAKE_CASE = """roberta"""
elif args.model_type == "gpt2":
_SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained(args.model_name)
_SCREAMING_SNAKE_CASE = """transformer"""
_SCREAMING_SNAKE_CASE = model.state_dict()
_SCREAMING_SNAKE_CASE = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_SCREAMING_SNAKE_CASE = F'''{prefix}.embeddings.{w}.weight'''
_SCREAMING_SNAKE_CASE = state_dict[param_name]
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = F'''{prefix}.embeddings.LayerNorm.{w}'''
_SCREAMING_SNAKE_CASE = state_dict[param_name]
# Transformer Blocks #
_SCREAMING_SNAKE_CASE = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
_SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_SCREAMING_SNAKE_CASE = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = state_dict[F'''lm_head.dense.{w}''']
_SCREAMING_SNAKE_CASE = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.ln_f.{w}''']
_SCREAMING_SNAKE_CASE = state_dict["""lm_head.weight"""]
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 165
|
def lowercase( UpperCamelCase_ ) -> list[list]:
'''simple docstring'''
UpperCamelCase = current_set.copy()
for row_index, row in enumerate(UpperCamelCase_ ):
UpperCamelCase = row[0]
for column_index, column in enumerate(UpperCamelCase_ ):
if magnitude == 0:
UpperCamelCase = column
continue
UpperCamelCase = column / magnitude
# Subtract to cancel term
UpperCamelCase = current_set[0]
UpperCamelCase = [first_row]
UpperCamelCase = current_set[1::]
for row in current_set:
UpperCamelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCamelCase_ )
continue
for column_index in range(len(UpperCamelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCamelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCamelCase = final_set[0]
UpperCamelCase = []
UpperCamelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCamelCase = simplify(UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , UpperCamelCase_ )
UpperCamelCase = resultant
return final_set
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
UpperCamelCase = len(UpperCamelCase_ ) + 1
if any(len(UpperCamelCase_ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(UpperCamelCase_ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(UpperCamelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCamelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCamelCase = data_set.copy()
UpperCamelCase = []
for row_index, row in enumerate(UpperCamelCase_ ):
if 0 not in row:
UpperCamelCase = data_set.pop(UpperCamelCase_ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , UpperCamelCase_ )
UpperCamelCase = data_set.copy()
UpperCamelCase = simplify(UpperCamelCase_ )
UpperCamelCase = simplified[::-1]
UpperCamelCase = []
for row in simplified:
UpperCamelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCamelCase = row.copy()[: len(UpperCamelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCamelCase_ ) == 0:
solutions.append(0 )
continue
UpperCamelCase = temp_row[1::]
UpperCamelCase = temp_row[::-1]
for column_index, column in enumerate(UpperCamelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCamelCase_ )
UpperCamelCase = []
for item in solutions:
final.append(float(round(UpperCamelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 165
| 1
|
'''simple docstring'''
import numpy as np
def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float = 1E-1_2 , UpperCAmelCase_ : int = 100 , ):
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase_ )[0] == np.shape(UpperCAmelCase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase_ ) == np.iscomplexobj(UpperCAmelCase_ )
lowerCamelCase_ = np.iscomplexobj(UpperCAmelCase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 1E1_2
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase_ = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
# Normalize the resulting output vector.
lowerCamelCase_ = w / np.linalg.norm(UpperCAmelCase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase_ = vector.conj().T if is_complex else vector.T
lowerCamelCase_ = np.dot(UpperCAmelCase_ , np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Check convergence.
lowerCamelCase_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase_ = True
lowerCamelCase_ = lambda_
if is_complex:
lowerCamelCase_ = np.real(lambda_ )
return lambda_, vector
def __snake_case ( ):
lowerCamelCase_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase_ = np.array([41, 4, 20] )
lowerCamelCase_ = real_input_matrix.astype(np.complexaaa )
lowerCamelCase_ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase_ = real_input_matrix
lowerCamelCase_ = real_vector
elif problem_type == "complex":
lowerCamelCase_ = complex_input_matrix
lowerCamelCase_ = complex_vector
# Our implementation.
lowerCamelCase_ ,lowerCamelCase_ = power_iteration(UpperCAmelCase_ , UpperCAmelCase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase_ ,lowerCamelCase_ = np.linalg.eigh(UpperCAmelCase_ )
# Last eigenvalue is the maximum one.
lowerCamelCase_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase_ ) - np.abs(UpperCAmelCase_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 55
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase__ = Image.open(SCREAMING_SNAKE_CASE )
return im.convert('''RGB''' )
@dataclass
class _a :
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
_lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
_lowercase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class _a :
_lowercase : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
_lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase : str = field(default=UpperCamelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase__ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _a ( ):
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ = {}
if data_args.train_dir is not None:
lowercase__ = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
lowercase__ = os.path.join(data_args.validation_dir , '''**''' )
lowercase__ = load_dataset(
'''imagefolder''' , data_files=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
lowercase__ = dataset['''train'''].train_test_split(data_args.train_val_split )
lowercase__ = split['''train''']
lowercase__ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ = dataset['''train'''].features['''labels'''].names
lowercase__ , lowercase__ = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = str(SCREAMING_SNAKE_CASE )
lowercase__ = label
# Load the accuracy metric from the datasets package
lowercase__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size['''shortest_edge''']
else:
lowercase__ = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase__ = Compose(
[
RandomResizedCrop(SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase__ = Compose(
[
Resize(SCREAMING_SNAKE_CASE ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE ):
lowercase__ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE ):
lowercase__ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase__ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase__ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Initalize our trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
lowercase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 110
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = FunnelTokenizer
_UpperCAmelCase :Union[str, Any] = FunnelTokenizerFast
_UpperCAmelCase :Union[str, Any] = True
_UpperCAmelCase :Tuple = True
def _snake_case ( self ):
super().setUp()
lowercase__: Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case ( self , **_UpperCAmelCase ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , **_UpperCAmelCase ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Tuple = '''UNwant\u00E9d,running'''
lowercase__: Optional[int] = '''unwanted, running'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = self.tokenizer_class(self.vocab_file )
lowercase__: Tuple = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
lowercase__: int = tokenizer('''UNwant\u00E9d,running''' )
lowercase__: Union[str, Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
lowercase__: Union[str, Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 2
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 2
| 1
|
def lowerCamelCase__ ( a__ : str , a__ : str ) -> bool:
UpperCamelCase_ = len(a__ ) + 1
UpperCamelCase_ = len(a__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCamelCase_ = [[0 for i in range(a__ )] for j in range(a__ )]
# since string of zero length match pattern of zero length
UpperCamelCase_ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , a__ ):
UpperCamelCase_ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , a__ ):
UpperCamelCase_ = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , a__ ):
for j in range(1 , a__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCamelCase_ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCamelCase_ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCamelCase_ = dp[i - 1][j]
else:
UpperCamelCase_ = 0
else:
UpperCamelCase_ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_A = '''aab'''
_A = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 122
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_A = 10
_A = 256
def lowerCamelCase__ ( a__ : List[str] ) -> Optional[MinHash]:
if len(a__ ) < MIN_NUM_TOKENS:
return None
UpperCamelCase_ = MinHash(num_perm=a__ )
for token in set(a__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( a__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0}
class lowercase_ :
def __init__( self , *,
__UpperCamelCase = 0.85 , ):
"""simple docstring"""
UpperCamelCase_ = duplication_jaccard_threshold
UpperCamelCase_ = NUM_PERM
UpperCamelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase_ = defaultdict(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self._index.query(__UpperCamelCase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase_ = [base] + list(__UpperCamelCase )
# reformat the cluster to be a list of dict
UpperCamelCase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__UpperCamelCase )
return duplicate_clusters
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.get_duplicate_clusters()
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( a__ : Optional[int] ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = element
UpperCamelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( a__ : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float ) -> List[Any]:
UpperCamelCase_ = DuplicationIndex(duplication_jaccard_threshold=a__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) , max_queue_size=100 ) ):
di.add(a__ , a__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( a__ : str , a__ : str ) -> float:
UpperCamelCase_ = get_tokens(a__ )
UpperCamelCase_ = get_tokens(a__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A = None
def lowerCamelCase__ ( a__ : str , a__ : str ) -> Optional[Any]:
UpperCamelCase_ = []
for elementa in cluster:
UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(a__ , a__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase_ = 1
extremes.append(a__ )
return extremes
def lowerCamelCase__ ( a__ : str , a__ : Optional[int] , a__ : Optional[int] ) -> str:
global _shared_dataset
UpperCamelCase_ = dataset
UpperCamelCase_ = []
UpperCamelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=a__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a__ , a__ , ) , total=len(a__ ) , ):
extremes_list.append(a__ )
return extremes_list
def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
UpperCamelCase_ = make_duplicate_clusters(a__ , a__ )
UpperCamelCase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase_ = {}
UpperCamelCase_ = find_extremes(a__ , a__ , a__ )
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase_ = element
UpperCamelCase_ = duplicate_indices - set(extreme_dict.keys() )
UpperCamelCase_ = dataset.filter(lambda a__ , a__ : idx not in remove_indices , with_indices=a__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCamelCase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(a__ )}''' )
print(f'''Number of duplicate clusters: {len(a__ )}''' )
print(f'''Files in duplicate cluster: {len(a__ )}''' )
print(f'''Unique files in duplicate cluster: {len(a__ )}''' )
print(f'''Filtered dataset size: {len(a__ )}''' )
return ds_filter, duplicate_clusters
| 122
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__a: List[Any] = 8
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=BITS ):
lowercase__ : str = x.device
lowercase__ : Any = (x * 255).int().clamp(0 , 255 )
lowercase__ : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase )
lowercase__ : Optional[Any] = rearrange(UpperCAmelCase , '''d -> d 1 1''' )
lowercase__ : Any = rearrange(UpperCAmelCase , '''b c h w -> b c 1 h w''' )
lowercase__ : Any = ((x & mask) != 0).float()
lowercase__ : Tuple = rearrange(UpperCAmelCase , '''b c d h w -> b (c d) h w''' )
lowercase__ : str = bits * 2 - 1
return bits
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=BITS ):
lowercase__ : str = x.device
lowercase__ : int = (x > 0).int()
lowercase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase , dtype=torch.intaa )
lowercase__ : Optional[Any] = rearrange(UpperCAmelCase , '''d -> d 1 1''' )
lowercase__ : Optional[int] = rearrange(UpperCAmelCase , '''b (c d) h w -> b c d h w''' , d=8 )
lowercase__ : Dict = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def __UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = True , UpperCAmelCase=None , UpperCAmelCase = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowercase__ : Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowercase__ : Any = self.alphas_cumprod[timestep]
lowercase__ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowercase__ : Tuple = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowercase__ : Optional[Any] = self.bit_scale
if self.config.clip_sample:
lowercase__ : Optional[int] = torch.clamp(UpperCAmelCase , -scale , UpperCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowercase__ : Optional[Any] = self._get_variance(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Tuple = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowercase__ : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowercase__ : Optional[Any] = model_output.device if torch.is_tensor(UpperCAmelCase ) else '''cpu'''
lowercase__ : Any = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase ).to(UpperCAmelCase )
lowercase__ : Any = self._get_variance(UpperCAmelCase , UpperCAmelCase ) ** 0.5 * eta * noise
lowercase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="epsilon" , UpperCAmelCase=None , UpperCAmelCase = True , ):
lowercase__ : Any = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : List[Any] = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Optional[Any] = self.alphas_cumprod[t]
lowercase__ : Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowercase__ : int = 1 - alpha_prod_t
lowercase__ : Dict = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowercase__ : List[str] = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
lowercase__ : int = self.bit_scale
if self.config.clip_sample:
lowercase__ : Any = torch.clamp(UpperCAmelCase , -scale , UpperCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowercase__ : List[str] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : List[str] = 0
if t > 0:
lowercase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCAmelCase ).to(model_output.device )
lowercase__ : Any = (self._get_variance(UpperCAmelCase , predicted_variance=UpperCAmelCase ) ** 0.5) * noise
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , ) -> str:
super().__init__()
lowercase__ : List[str] = bit_scale
lowercase__ : str = (
ddim_bit_scheduler_step if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 50 , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
lowercase__ : str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowerCAmelCase , )
lowercase__ : List[Any] = decimal_to_bits(__lowerCAmelCase ) * self.bit_scale
lowercase__ : List[str] = latents.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowercase__ : List[str] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : List[Any] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
lowercase__ : int = bits_to_decimal(__lowerCAmelCase )
if output_type == "pil":
lowercase__ : int = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 214
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Union[str, Any]:
super().__init__()
lowercase__ : Any = initial_learning_rate
lowercase__ : str = warmup_steps
lowercase__ : Dict = power
lowercase__ : int = decay_schedule_fn
lowercase__ : Dict = name
def __call__( self , __lowerCAmelCase ) -> int:
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase__ : Union[str, Any] = tf.cast(__lowerCAmelCase , tf.floataa )
lowercase__ : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
lowercase__ : Dict = global_step_float / warmup_steps_float
lowercase__ : Dict = self.initial_learning_rate * tf.math.pow(__lowerCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__lowerCAmelCase , )
def _lowerCAmelCase( self ) -> List[str]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 0.9 , UpperCAmelCase = 0.9_9_9 , UpperCAmelCase = 1E-8 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 , UpperCAmelCase = None , ):
lowercase__ : List[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase , )
if num_warmup_steps:
lowercase__ : Optional[Any] = WarmUp(
initial_learning_rate=UpperCAmelCase , decay_schedule_fn=UpperCAmelCase , warmup_steps=UpperCAmelCase , )
if weight_decay_rate > 0.0:
lowercase__ : str = AdamWeightDecay(
learning_rate=UpperCAmelCase , weight_decay_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=UpperCAmelCase , )
else:
lowercase__ : Any = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase = 0.0_0_1 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.9_9_9 , __lowerCAmelCase = 1E-7 , __lowerCAmelCase = False , __lowerCAmelCase = 0.0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "AdamWeightDecay" , **__lowerCAmelCase , ) -> str:
super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : int = weight_decay_rate
lowercase__ : Dict = include_in_weight_decay
lowercase__ : List[str] = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase ) -> str:
lowercase__ : int = {'''WarmUp''': WarmUp}
return super(__lowerCAmelCase , cls ).from_config(__lowerCAmelCase , custom_objects=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
super(__lowerCAmelCase , self )._prepare_local(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : str = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ) -> List[Any]:
lowercase__ , lowercase__ : Any = list(zip(*__lowerCAmelCase ) )
return super(__lowerCAmelCase , self ).apply_gradients(zip(__lowerCAmelCase , __lowerCAmelCase ) , name=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase__ : Union[str, Any] = apply_state or {}
lowercase__ : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase__ : Union[str, Any] = self._fallback_apply_state(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Optional[int] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Any:
lowercase__ , lowercase__ : Dict = self._get_lr(var.device , var.dtype.base_dtype , __lowerCAmelCase )
lowercase__ : List[str] = self._decay_weights_op(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase , self )._resource_apply_dense(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]:
lowercase__ , lowercase__ : List[Any] = self._get_lr(var.device , var.dtype.base_dtype , __lowerCAmelCase )
lowercase__ : Union[str, Any] = self._decay_weights_op(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCAmelCase , self )._resource_apply_sparse(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[Any] = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCAmelCase , __lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCAmelCase , __lowerCAmelCase ) is not None:
return False
return True
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
lowercase__ : List[str] = []
lowercase__ : Optional[int] = None
@property
def _lowerCAmelCase( self ) -> Optional[Any]:
if self._accum_steps is None:
lowercase__ : int = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _lowerCAmelCase( self ) -> Union[str, Any]:
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __lowerCAmelCase ) -> Optional[Any]:
if not self._gradients:
lowercase__ : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCAmelCase ) , trainable=__lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCAmelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__lowerCAmelCase )}""" )
for accum_gradient, gradient in zip(self._gradients , __lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCAmelCase )
self._accum_steps.assign_add(1 )
def _lowerCAmelCase( self ) -> Optional[Any]:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCAmelCase ) )
| 214
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str = 1000 ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 1, 1
UpperCAmelCase_ : Optional[Any] = 2
while True:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : List[Any] = fa + fa
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = fa, f
index += 1
for _ in str(a__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 125
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : int = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
| 6
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCAmelCase_ ) )
__UpperCAmelCase : Any = torch.manual_seed(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : Any = pipe(**UpperCAmelCase_ ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : List[Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs()
__UpperCAmelCase : str = pipe(**UpperCAmelCase_ ).images
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Optional[int] = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCAmelCase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = self.get_dummy_inputs()
__UpperCAmelCase : Dict = pipe(**UpperCAmelCase_ ).images
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Union[str, Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCAmelCase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : List[str] = pipe(**UpperCAmelCase_ ).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Dict = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__UpperCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = self.get_dummy_inputs()
__UpperCAmelCase : Optional[int] = pipe(**UpperCAmelCase_ ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : str = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ort.SessionOptions()
__UpperCAmelCase : List[Any] = False
return options
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__UpperCAmelCase : int = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__UpperCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCAmelCase : str = "A fantasy landscape, trending on artstation"
__UpperCAmelCase : Any = torch.manual_seed(0 )
__UpperCAmelCase : int = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type="np" , )
__UpperCAmelCase : Dict = output.images
__UpperCAmelCase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__UpperCAmelCase : int = init_image.resize((128, 128) )
__UpperCAmelCase : Optional[int] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
__UpperCAmelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCAmelCase : int = "A fantasy landscape, trending on artstation"
__UpperCAmelCase : List[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type="np" , )
__UpperCAmelCase : List[str] = output.images
__UpperCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__UpperCAmelCase : str = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 363
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __UpperCamelCase ( _UpperCAmelCase=None ):
if subparsers is not None:
__UpperCAmelCase : Optional[int] = subparsers.add_parser("env" )
else:
__UpperCAmelCase : List[Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file", default=_UpperCAmelCase, help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Dict = torch.__version__
__UpperCAmelCase : str = torch.cuda.is_available()
__UpperCAmelCase : str = is_xpu_available()
__UpperCAmelCase : List[Any] = is_npu_available()
__UpperCAmelCase : Union[str, Any] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase : List[str] = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(_UpperCAmelCase ),
"PyTorch NPU available": str(_UpperCAmelCase ),
"System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
__UpperCAmelCase : int = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__UpperCAmelCase : Tuple = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase, _UpperCAmelCase )
else F"\t{accelerate_config}"
)
print(_UpperCAmelCase )
__UpperCAmelCase : Any = accelerate_config
return info
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = env_command_parser()
__UpperCAmelCase : Dict = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 37
| 0
|
"""simple docstring"""
import math
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 165
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
A_ : Dict = HfApi()
A_ : List[str] = {}
# fmt: off
A_ : Dict = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
A_ : List[Any] = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
A_ : str = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
A_ : List[Any] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
A_ : Tuple = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
A_ : List[str] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
A_ : List[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
A_ : Dict = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
A_ : Tuple = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
A_ : str = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
A_ : str = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
A_ : int = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
A_ : int = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
A_ : str = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
A_ : Optional[int] = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
A_ : List[str] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
A_ : Dict = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
A_ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
A_ : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
A_ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
A_ : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
A_ : Optional[int] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!')
| 165
| 1
|
from ..utils import DummyObject, requires_backends
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = ['''note_seq''']
def __init__( self :int ,*__snake_case :List[str] ,**__snake_case :Union[str, Any] ) -> Optional[Any]:
requires_backends(self ,['note_seq'] )
@classmethod
def lowerCamelCase__( cls :Optional[int] ,*__snake_case :Dict ,**__snake_case :Optional[int] ) -> List[Any]:
requires_backends(cls ,['note_seq'] )
@classmethod
def lowerCamelCase__( cls :int ,*__snake_case :List[Any] ,**__snake_case :Dict ) -> List[str]:
requires_backends(cls ,['note_seq'] )
| 354
|
snake_case : str = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def __lowercase ( __lowerCAmelCase : float ):
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
a__ = int(__lowerCAmelCase )
a__ = ''
a__ = False
if decimal < 0:
a__ = True
decimal *= -1
while decimal > 0:
a__ , a__ = divmod(__lowerCAmelCase , 1_6 )
a__ = values[remainder] + hexadecimal
a__ = '0x' + hexadecimal
if negative:
a__ = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = FunnelTokenizer
lowerCAmelCase__ : Tuple = FunnelTokenizerFast
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase__ (self : int , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = '''UNwant\u00E9d,running'''
lowercase__ = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
lowercase__ = tokenizer('''UNwant\u00E9d,running''' )
lowercase__ = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
lowercase__ = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 2
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase : Any = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase : List[Any] = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase : List[str] = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase : str = np.expand_dims(test_image, axis=0)
lowerCamelCase : List[str] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase : Any = 'Normal'
if result[0][0] == 1:
lowerCamelCase : Any = 'Abnormality detected'
| 2
| 1
|
class _snake_case :
def __init__( self , _lowerCamelCase ):
a :Optional[Any] = size
a :Dict = [0] * size
a :List[str] = [0] * size
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
return index | (index + 1)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
return (index & (index + 1)) - 1
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :str = value
while index < self.size:
a :List[Any] = self.get_prev(_lowerCamelCase ) + 1
if current_left_border == index:
a :str = value
else:
a :Optional[Any] = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a :List[Any] = self.get_next(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
right -= 1 # Because of right is exclusive
a :Dict = 0
while left <= right:
a :Tuple = self.get_prev(_lowerCamelCase )
if left <= current_left:
a :Any = max(_lowerCamelCase , self.tree[right] )
a :Union[str, Any] = current_left
else:
a :Union[str, Any] = max(_lowerCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Any=100 , UpperCAmelCase_ : List[str]=1026 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str="data/tokenized_stories_train_wikitext103.jbl" , UpperCAmelCase_ : List[Any]="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
a , a :Optional[int] = generate_datasets(
UpperCAmelCase_ , UpperCAmelCase_ , number=UpperCAmelCase_ , min_len=1026 , trim=UpperCAmelCase_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
a :str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
a :str = load_gpta('''gpt2''' ).to(UpperCAmelCase_ )
print('''computing perplexity on objective set''' )
a :Dict = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).item()
print('''perplexity on objective set:''' , UpperCAmelCase_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=15 , UpperCAmelCase_ : Optional[Any]=128 , UpperCAmelCase_ : List[Any]=100 , UpperCAmelCase_ : List[str]="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
a :Tuple = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
a :List[str] = SecondaryLearner(UpperCAmelCase_ )
# Train secondary learner
a :List[str] = train_secondary_learner(
UpperCAmelCase_ , UpperCAmelCase_ , max_epochs=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , eval_freq=100 , igf_model_path=UpperCAmelCase_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : List[str]=1000 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : Any=1.0 , UpperCAmelCase_ : Optional[int]=recopy_gpta , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=10 , UpperCAmelCase_ : Any="gpt2_finetuned.pt" , ):
"""simple docstring"""
a :Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
a :Optional[Any] = RandomSampler(UpperCAmelCase_ )
a :Union[str, Any] = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ )
a :List[str] = max_steps // (len(UpperCAmelCase_ )) + 1
a :Tuple = 0
a :int = torch.zeros((1, context_len) , dtype=torch.long , device=UpperCAmelCase_ )
a , a , a :str = recopy_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCAmelCase_ )
secondary_learner.eval()
a :Optional[Any] = []
a :Union[str, Any] = 0
a :Optional[Any] = []
a :Tuple = []
# Compute the performance of the transformer model at the beginning
a :Any = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
test_perps.append(UpperCAmelCase_ )
print('''Test perplexity, step''' , UpperCAmelCase_ , ''':''' , UpperCAmelCase_ )
for epoch in range(int(UpperCAmelCase_ ) ):
for step, example in enumerate(UpperCAmelCase_ ):
torch.cuda.empty_cache()
a :Tuple = random.randint(0 , example.size(2 ) - context_len - 1 )
a :Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
a :Optional[int] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
a :int = True
if secondary_learner is not None:
a :Tuple = secondary_learner.forward(
torch.tensor(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCAmelCase_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
a :List[str] = -1
if predicted_q < threshold:
a :Tuple = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
a :Any = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
a :Tuple = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
a :Dict = compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
test_perps.append(UpperCAmelCase_ )
print('''Test perplexity, step''' , UpperCAmelCase_ , ''':''' , UpperCAmelCase_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , UpperCAmelCase_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCamelCase ( ):
"""simple docstring"""
a :Union[str, Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=UpperCAmelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=UpperCAmelCase_ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=UpperCAmelCase_ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=UpperCAmelCase_ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=UpperCAmelCase_ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=UpperCAmelCase_ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=UpperCAmelCase_ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=UpperCAmelCase_ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=UpperCAmelCase_ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=UpperCAmelCase_ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=UpperCAmelCase_ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=UpperCAmelCase_ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=UpperCAmelCase_ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
a :Union[str, Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
a :Any = training_secondary_learner(
UpperCAmelCase_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
a :Any = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
a , a :Union[str, Any] = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=UpperCAmelCase_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=UpperCAmelCase_ , secondary_learner=UpperCAmelCase_ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 281
| 1
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : torch.FloatTensor
__lowerCamelCase : Optional[torch.FloatTensor] = None
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=0.999 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : str = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : List[Any] = i / num_diffusion_timesteps
lowercase__ : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case ):
@register_to_config
def __init__( self , a = 1000 , a = "fixed_small_log" , a = True , a = 1.0 , a = "epsilon" , a = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'')
lowercase__ : Union[str, Any] = betas_for_alpha_bar(a)
lowercase__ : Optional[int] = 1.0 - self.betas
lowercase__ : List[str] = torch.cumprod(self.alphas , dim=0)
lowercase__ : List[Any] = torch.tensor(1.0)
# standard deviation of the initial noise distribution
lowercase__ : Optional[Any] = 1.0
# setable values
lowercase__ : int = None
lowercase__ : int = torch.from_numpy(np.arange(0 , a)[::-1].copy())
lowercase__ : Union[str, Any] = variance_type
def snake_case_ ( self , a , a = None):
return sample
def snake_case_ ( self , a , a = None):
lowercase__ : int = num_inference_steps
lowercase__ : Union[str, Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ : str = (np.arange(0 , a) * step_ratio).round()[::-1].copy().astype(np.intaa)
lowercase__ : int = torch.from_numpy(a).to(a)
def snake_case_ ( self , a , a=None , a=None , a=None):
if prev_timestep is None:
lowercase__ : Optional[int] = t - 1
lowercase__ : Tuple = self.alphas_cumprod[t]
lowercase__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : str = 1 - alpha_prod_t
lowercase__ : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Any = self.betas[t]
else:
lowercase__ : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : Optional[int] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ : Any = torch.log(torch.clamp(a , min=1e-20))
lowercase__ : Dict = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ : List[Any] = variance.log()
lowercase__ : List[str] = beta.log()
lowercase__ : int = (predicted_variance + 1) / 2
lowercase__ : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case_ ( self , a , a , a , a = None , a=None , a = True , ):
lowercase__ : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__ , lowercase__ : List[Any] = torch.split(a , sample.shape[1] , dim=1)
else:
lowercase__ : str = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ : Tuple = t - 1
lowercase__ : Tuple = self.alphas_cumprod[t]
lowercase__ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ : Dict = 1 - alpha_prod_t
lowercase__ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ : Optional[Any] = self.betas[t]
lowercase__ : Dict = self.alphas[t]
else:
lowercase__ : str = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ : str = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : List[Any] = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : Optional[int] = torch.clamp(
a , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ : Union[str, Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ : Dict = 0
if t > 0:
lowercase__ : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=a , device=model_output.device)
lowercase__ : str = self._get_variance(
a , predicted_variance=a , prev_timestep=a , )
if self.variance_type == "fixed_small_log":
lowercase__ : Optional[Any] = variance
elif self.variance_type == "learned_range":
lowercase__ : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.')
lowercase__ : Any = variance * variance_noise
lowercase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=a , pred_original_sample=a)
def snake_case_ ( self , a , a , a , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
lowercase__ : Dict = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
lowercase__ : str = timesteps.to(original_samples.device)
lowercase__ : List[str] = alphas_cumprod[timesteps] ** 0.5
lowercase__ : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
lowercase__ : List[str] = sqrt_alpha_prod.unsqueeze(-1)
lowercase__ : str = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
lowercase__ : Dict = sqrt_one_minus_alpha_prod.unsqueeze(-1)
lowercase__ : Union[str, Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 214
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
snake_case_ = logging.get_logger(__name__)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : List[str] = set()
lowercase__ : List[str] = []
def parse_line(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ : Optional[Any] = '\n'.join(SCREAMING_SNAKE_CASE_ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE_ )
buffer.clear()
continue
else:
lowercase__ : Optional[Any] = line.strip()
buffer.append(SCREAMING_SNAKE_CASE_ )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = set()
lowercase__ : List[str] = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
return values.split(',' )
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
snake_case_ = parser.parse_args()
snake_case_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
snake_case_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
snake_case_ = extract_warnings(args.output_dir, args.targets)
snake_case_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 214
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCamelCase_ ( _a : str , _a : str , _a : str , _a : PreTrainedTokenizer , _a : int , _a : Optional[int] = None , ):
'''simple docstring'''
UpperCAmelCase_ : Any = {}
if train_file is not None:
UpperCAmelCase_ : str = [train_file]
if eval_file is not None:
UpperCAmelCase_ : Any = [eval_file]
if test_file is not None:
UpperCAmelCase_ : Optional[int] = [test_file]
UpperCAmelCase_ : Tuple = datasets.load_dataset("""csv""" , data_files=_a )
UpperCAmelCase_ : Any = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase_ : Union[str, Any] = features_name.pop(_a )
UpperCAmelCase_ : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase_ : List[Any] = {label: i for i, label in enumerate(_a )}
UpperCAmelCase_ : Any = tokenizer.model_input_names
UpperCAmelCase_ : Union[str, Any] = {}
if len(_a ) == 1:
for k in files.keys():
UpperCAmelCase_ : Optional[Any] = ds[k].map(
lambda _a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_a , max_length=_a , padding="""max_length""" ) , batched=_a , )
elif len(_a ) == 2:
for k in files.keys():
UpperCAmelCase_ : int = ds[k].map(
lambda _a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_a , max_length=_a , padding="""max_length""" , ) , batched=_a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase_ : Tuple = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase_ : str = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase_ : Tuple = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase_ : int = (
tf.data.Dataset.from_generator(
_a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase_ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase_ : str = (
tf.data.Dataset.from_generator(
_a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase_ : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase_ : str = (
tf.data.Dataset.from_generator(
_a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase_ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(metadata={"help": "Which column contains the label"} )
A__ : str = field(default=__snake_case , metadata={"help": "The path of the training file"} )
A__ : Optional[str] = field(default=__snake_case , metadata={"help": "The path of the development file"} )
A__ : Optional[str] = field(default=__snake_case , metadata={"help": "The path of the test file"} )
A__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=__snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : bool = field(default=__snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCAmelCase_ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_a ) , labelaid=_a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCAmelCase_ : str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase_ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase_ : List[str] = TFTrainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : Tuple = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCAmelCase_ : Any = trainer.evaluate()
UpperCAmelCase_ : Any = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(_a )
return results
if __name__ == "__main__":
main()
| 358
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase_ = ['''small''', '''medium''', '''large''']
UpperCamelCase_ = '''lm_head.decoder.weight'''
UpperCamelCase_ = '''lm_head.weight'''
def lowerCamelCase_ ( _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.load(_a )
UpperCAmelCase_ : Tuple = d.pop(_a )
os.makedirs(_a , exist_ok=_a )
torch.save(_a , os.path.join(_a , _a ) )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
UpperCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase_ = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
UpperCamelCase_ = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 59
| 0
|
'''simple docstring'''
import functools
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowerCAmelCase_ ) != 3 or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowerCAmelCase_ ) == 0:
return 0
if min(lowerCAmelCase_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowerCAmelCase_ ) >= 366:
raise ValueError('All days elements should be less than 366' )
_a : Any = set(lowerCAmelCase_ )
@functools.cache
def dynamic_programming(lowerCAmelCase_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
_lowerCAmelCase = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase__ : Tuple = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token
lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : Dict = remove_space
lowerCAmelCase__ : Optional[Any] = keep_accents
lowerCAmelCase__ : int = vocab_file
lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase__ : List[str] = re.compile(
F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ) -> Any:
lowerCAmelCase__ : int = self.__dict__.copy()
lowerCAmelCase__ : Optional[int] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase_ ( self ) -> int:
return len(self.sp_model )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase )
# Normalize whitespaces
lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase )
return text
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> str:
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Any = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def UpperCAmelCase_ ( self ) -> Dict[str, int]:
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase )
lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase )
else:
lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text]
lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase )
return token_ids
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.sp_model.decode(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]:
lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowerCAmelCase__ : Any = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__UpperCAmelCase )
| 37
| 0
|
"""simple docstring"""
def A ( lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCamelCase = grid[0]
for row_n in range(1 , len(lowercase ) ):
UpperCamelCase = grid[row_n]
UpperCamelCase = fill_row(lowercase , lowercase )
UpperCamelCase = grid[row_n]
return grid[-1][-1]
def A ( lowercase , lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase : Optional[Any] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_UpperCAmelCase : int = "hopper-medium-v2"
_UpperCAmelCase : Tuple = gym.make(env_name)
_UpperCAmelCase : Any = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
_UpperCAmelCase : Optional[Any] = env.reset()
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Dict = 1_000
_UpperCAmelCase : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase : int = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = env.step(denorm_actions)
_UpperCAmelCase : int = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase : Union[str, Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 110
| 0
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
UpperCAmelCase : Tuple = sum(_lowerCAmelCase ) / len(_lowerCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A: Optional[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
__A = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
__A = '''▁'''
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "token_type_ids"]
snake_case__ = FNetTokenizer
def __init__( self : str , UpperCAmelCase : str=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : str="<unk>" , UpperCAmelCase : List[Any]="[SEP]" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : List[str]="[CLS]" , UpperCAmelCase : str="[MASK]" , **UpperCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase : Optional[int] = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : Dict = remove_space
__lowerCamelCase : str = keep_accents
__lowerCamelCase : Union[str, Any] = vocab_file
__lowerCamelCase : List[Any] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Optional[int] = [self.sep_token_id]
__lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase : Optional[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 359
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "rwkv"
snake_case__ = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=50277 , UpperCAmelCase : Dict=1024 , UpperCAmelCase : int=4096 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : str=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=1E-5 , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : int=0 , UpperCAmelCase : Tuple=6 , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[Any]=True , **UpperCAmelCase : Any , ):
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : List[Any] = context_length
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : str = layer_norm_epsilon
__lowerCamelCase : Dict = rescale_every
__lowerCamelCase : Optional[Any] = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Tuple = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 64
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ ( _snake_case : Tuple ) -> Any:
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Dict:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def lowerCAmelCase_ ( _snake_case : Any ) -> str:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__magic_name__ : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
if exitstatus == 5:
__magic_name__ : Dict = 0
# Doctest custom flag to ignore output.
snake_case : Optional[int] = doctest.register_optionflag("IGNORE_RESULT")
snake_case : Tuple = doctest.OutputChecker
class _snake_case ( snake_case ):
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _a , _a , _a )
snake_case : List[str] = CustomOutputChecker
snake_case : Any = HfDoctestModule
snake_case : Union[str, Any] = HfDocTestParser
| 281
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( _snake_case : str = "laptop" ) -> DataFrame:
'''simple docstring'''
__magic_name__ : Tuple = F'''https://www.amazon.in/laptop/s?k={product}'''
__magic_name__ : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__magic_name__ : Tuple = BeautifulSoup(requests.get(_snake_case , headers=_snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__magic_name__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__magic_name__ : Dict = item.ha.text
__magic_name__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
__magic_name__ : Optional[Any] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__magic_name__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__magic_name__ : Dict = "Not available"
try:
__magic_name__ : Optional[int] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__magic_name__ : List[str] = ""
try:
__magic_name__ : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__magic_name__ : str = float("nan" )
except AttributeError:
pass
__magic_name__ : Optional[int] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__magic_name__ : Optional[Any] = " "
__magic_name__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : Any = "headphones"
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 281
| 1
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowerCAmelCase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_lowerCAmelCase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = calculate_rouge(UpperCamelCase , UpperCamelCase , bootstrap_aggregation=UpperCamelCase , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = calculate_rouge(UpperCamelCase , UpperCamelCase , bootstrap_aggregation=UpperCamelCase , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """rougeLsum"""
lowerCAmelCase__ : Union[str, Any] = calculate_rouge(UpperCamelCase , UpperCamelCase , newline_sep=UpperCamelCase , rouge_keys=[k] )[k]
lowerCAmelCase__ : List[Any] = calculate_rouge(UpperCamelCase , UpperCamelCase , newline_sep=UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = ["""rouge1""", """rouge2""", """rougeL"""]
lowerCAmelCase__ : Union[str, Any] = calculate_rouge(UpperCamelCase , UpperCamelCase , newline_sep=UpperCamelCase , rouge_keys=UpperCamelCase )
lowerCAmelCase__ : List[Any] = calculate_rouge(UpperCamelCase , UpperCamelCase , newline_sep=UpperCamelCase , rouge_keys=UpperCamelCase )
assert score_sep == score_no_sep
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowerCAmelCase__ : Dict = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCamelCase , UpperCamelCase , newline_sep=UpperCamelCase ) == calculate_rouge(UpperCamelCase , UpperCamelCase , newline_sep=UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowerCAmelCase__ : Dict = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowerCAmelCase__ : str = calculate_rouge(UpperCamelCase , UpperCamelCase , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCamelCase )["""rougeLsum"""]
lowerCAmelCase__ : Optional[Any] = calculate_rouge(UpperCamelCase , UpperCamelCase , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowerCAmelCase__ : str = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCamelCase )
assert isinstance(UpperCamelCase , UpperCamelCase )
| 184
|
'''simple docstring'''
import os
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = len(grid[0] )
lowerCAmelCase__ : int = len(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase ):
for j in range(n_rows - 3 ):
lowerCAmelCase__ : str = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCAmelCase__ : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCAmelCase__ : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCAmelCase__ : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCAmelCase__ : Dict = max(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if max_product > largest:
lowerCAmelCase__ : Any = max_product
return largest
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
with open(os.path.dirname(UpperCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
lowerCAmelCase__ : Dict = [[int(UpperCamelCase ) for i in grid[j]] for j in range(len(UpperCamelCase ) )]
return largest_product(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 184
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A__ ( A_):
A_ : Union[List[PIL.Image.Image], np.ndarray]
A_ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 86
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCAmelCase ( A_ ):
A__ : jnp.ndarray
@flax_register_to_config
class UpperCAmelCase ( nn.Module ,A_ ,A_ ):
A__ : int = 32
A__ : int = 4
A__ : int = 4
A__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A__ : Union[bool, Tuple[bool]] = False
A__ : Tuple[int] = (3_20, 6_40, 12_80, 12_80)
A__ : int = 2
A__ : Union[int, Tuple[int]] = 8
A__ : Optional[Union[int, Tuple[int]]] = None
A__ : int = 12_80
A__ : float = 0.0
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
A__ : bool = True
A__ : int = 0
A__ : bool = False
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
snake_case : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case : Any = jnp.zeros(snake_case__ , dtype=jnp.floataa )
snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa )
snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case , snake_case : Optional[int] = jax.random.split(snake_case__ )
snake_case : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : str = self.block_out_channels
snake_case : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case : Tuple = self.num_attention_heads or self.attention_head_dim
# input
snake_case : Tuple = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
snake_case : List[str] = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
snake_case : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case : List[Any] = []
snake_case : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case : List[Any] = output_channel
snake_case : Dict = block_out_channels[i]
snake_case : Optional[Any] = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case : Union[str, Any] = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
snake_case : Dict = down_blocks
# mid
snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case : Optional[Any] = []
snake_case : Optional[int] = list(reversed(snake_case__ ) )
snake_case : Dict = list(reversed(snake_case__ ) )
snake_case : Tuple = list(reversed(snake_case__ ) )
snake_case : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case : Optional[int] = output_channel
snake_case : List[Any] = reversed_block_out_channels[i]
snake_case : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )]
snake_case : int = i == len(snake_case__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case : Any = FlaxCrossAttnUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case : Optional[int] = FlaxUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case__ )
snake_case : Optional[int] = output_channel
snake_case : Tuple = up_blocks
# out
snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , snake_case__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(snake_case__ , jnp.ndarray ):
snake_case : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case : Any = timesteps.astype(dtype=jnp.floataa )
snake_case : int = jnp.expand_dims(snake_case__ , 0 )
snake_case : str = self.time_proj(snake_case__ )
snake_case : str = self.time_embedding(snake_case__ )
# 2. pre-process
snake_case : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
snake_case : List[Any] = self.conv_in(snake_case__ )
# 3. down
snake_case : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
snake_case , snake_case : List[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
snake_case , snake_case : str = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case__ , snake_case__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case : Optional[int] = new_down_block_res_samples
# 4. mid
snake_case : Optional[int] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case : int = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = up_block(
snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , )
else:
snake_case : Dict = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train )
# 6. post-process
snake_case : List[str] = self.conv_norm_out(snake_case__ )
snake_case : Any = nn.silu(snake_case__ )
snake_case : Optional[int] = self.conv_out(snake_case__ )
snake_case : Union[str, Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case__ )
| 59
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
__snake_case : List[str] = _modexpt(__UpperCAmelCase , exponent // 2 , __UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase , exponent - 1 , __UpperCAmelCase )) % modulo_value
def __UpperCAmelCase ( UpperCAmelCase_ : Any = 17_77 , UpperCAmelCase_ : Tuple = 18_55 , UpperCAmelCase_ : List[str] = 8 ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = base
for _ in range(1 , __UpperCAmelCase ):
__snake_case : int = _modexpt(__UpperCAmelCase , __UpperCAmelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_a : Any= logging.get_logger(__name__)
_a : str= {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : Optional[Any]= [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_a : List[Any]= {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_a : str= {f'''funnel-transformer/{name}''': 512 for name in _model_names}
_a : List[Any]= {f'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Tuple = FunnelTokenizer
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = 2
def __init__(self : int , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=True , _A : List[str]="<unk>" , _A : Any="<sep>" , _A : Dict="<pad>" , _A : Tuple="<cls>" , _A : Dict="<mask>" , _A : Optional[Any]="<s>" , _A : List[Any]="</s>" , _A : Optional[int]=True , _A : Dict=True , _A : Tuple=None , _A : int="##" , **_A : Any , ) -> str:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , bos_token=_A , eos_token=_A , clean_text=_A , tokenize_chinese_chars=_A , strip_accents=_A , wordpieces_prefix=_A , **_A , )
__snake_case : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , _A) != do_lower_case
or normalizer_state.get('strip_accents' , _A) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A) != tokenize_chinese_chars
):
__snake_case : List[str] = getattr(_A , normalizer_state.pop('type'))
__snake_case : int = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : str = tokenize_chinese_chars
__snake_case : Optional[int] = normalizer_class(**_A)
__snake_case : str = do_lower_case
def _lowercase (self : Optional[Any] , _A : Dict , _A : Tuple=None) -> Any:
__snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : str , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : Tuple , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : int = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
| 95
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : Dict = 1_00 ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = None
# source code of `config_class`
lowercase__ = inspect.getsource(SCREAMING_SNAKE_CASE )
lowercase__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase__ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowercase__ = ckpt_name
break
return checkpoint
def _a ( ):
"""simple docstring"""
lowercase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE )
lowercase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '''\n'''.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 110
| 0
|
import numpy as np
_UpperCAmelCase : Any = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class lowercase :
def __init__( self ):
snake_case_ = np.array(snake_case )
def a ( self , snake_case ):
snake_case_ , snake_case_ = np.where(letter == self.SQUARE )
snake_case_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def a ( self , snake_case , snake_case ):
snake_case_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def a ( self , snake_case ):
snake_case_ = message.lower()
snake_case_ = message.replace(' ' , '' )
snake_case_ = message.replace('j' , 'i' )
snake_case_ = np.empty((2, len(snake_case )) )
for letter_index in range(len(snake_case ) ):
snake_case_ = self.letter_to_numbers(message[letter_index] )
snake_case_ = numbers[0]
snake_case_ = numbers[1]
snake_case_ = first_step.reshape(2 * len(snake_case ) )
snake_case_ = ''
for numbers_index in range(len(snake_case ) ):
snake_case_ = int(second_step[numbers_index * 2] )
snake_case_ = int(second_step[(numbers_index * 2) + 1] )
snake_case_ = self.numbers_to_letter(snake_case , snake_case )
snake_case_ = encoded_message + letter
return encoded_message
def a ( self , snake_case ):
snake_case_ = message.lower()
message.replace(' ' , '' )
snake_case_ = np.empty(2 * len(snake_case ) )
for letter_index in range(len(snake_case ) ):
snake_case_ = self.letter_to_numbers(message[letter_index] )
snake_case_ = numbers[0]
snake_case_ = numbers[1]
snake_case_ = first_step.reshape((2, len(snake_case )) )
snake_case_ = ''
for numbers_index in range(len(snake_case ) ):
snake_case_ = int(second_step[0, numbers_index] )
snake_case_ = int(second_step[1, numbers_index] )
snake_case_ = self.numbers_to_letter(snake_case , snake_case )
snake_case_ = decoded_message + letter
return decoded_message
| 200
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 200
| 1
|
def lowerCamelCase_ ( _a : list[list] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = current_set.copy()
for row_index, row in enumerate(snake_case__ ):
UpperCAmelCase_ : Tuple = row[0]
for column_index, column in enumerate(snake_case__ ):
if magnitude == 0:
UpperCAmelCase_ : List[Any] = column
continue
UpperCAmelCase_ : Tuple = column / magnitude
# Subtract to cancel term
UpperCAmelCase_ : Optional[Any] = current_set[0]
UpperCAmelCase_ : List[str] = [first_row]
UpperCAmelCase_ : str = current_set[1::]
for row in current_set:
UpperCAmelCase_ : int = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(snake_case__ )
continue
for column_index in range(len(snake_case__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(snake_case__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase_ : Union[str, Any] = final_set[0]
UpperCAmelCase_ : int = []
UpperCAmelCase_ : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase_ : Optional[Any] = simplify(snake_case__ )
for i in range(len(snake_case__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , snake_case__ )
UpperCAmelCase_ : List[str] = resultant
return final_set
def lowerCamelCase_ ( _a : list[list] ):
'''simple docstring'''
if len(snake_case__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
UpperCAmelCase_ : Union[str, Any] = len(snake_case__ ) + 1
if any(len(snake_case__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(snake_case__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(snake_case__ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase_ : Dict = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase_ : Any = data_set.copy()
UpperCAmelCase_ : int = []
for row_index, row in enumerate(snake_case__ ):
if 0 not in row:
UpperCAmelCase_ : Optional[int] = data_set.pop(snake_case__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , snake_case__ )
UpperCAmelCase_ : str = data_set.copy()
UpperCAmelCase_ : Union[str, Any] = simplify(snake_case__ )
UpperCAmelCase_ : int = simplified[::-1]
UpperCAmelCase_ : list = []
for row in simplified:
UpperCAmelCase_ : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase_ : Dict = row.copy()[: len(snake_case__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(snake_case__ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase_ : Optional[Any] = temp_row[1::]
UpperCAmelCase_ : Dict = temp_row[::-1]
for column_index, column in enumerate(snake_case__ ):
current_solution -= column * solutions[column_index]
solutions.append(snake_case__ )
UpperCAmelCase_ : str = []
for item in solutions:
final.append(float(round(snake_case__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 345
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64
| 0
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 184
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''instructblip_vision_model'''
def __init__( self ,__UpperCAmelCase=1408 ,__UpperCAmelCase=6144 ,__UpperCAmelCase=39 ,__UpperCAmelCase=16 ,__UpperCAmelCase=224 ,__UpperCAmelCase=14 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=1E-10 ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> List[Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Optional[int] = attention_dropout
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : int = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowerCAmelCase__ : Optional[int] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''instructblip_qformer'''
def __init__( self ,__UpperCAmelCase=3_0522 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=2 ,__UpperCAmelCase=1408 ,**__UpperCAmelCase ,) -> Tuple:
super().__init__(pad_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : Dict = position_embedding_type
lowerCAmelCase__ : int = cross_attention_frequency
lowerCAmelCase__ : List[Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowerCAmelCase__ : Tuple = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = '''instructblip'''
__lowercase : str = True
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=32 ,**__UpperCAmelCase ) -> Any:
super().__init__(**__UpperCAmelCase )
if vision_config is None:
lowerCAmelCase__ : Any = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
lowerCAmelCase__ : List[str] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
lowerCAmelCase__ : List[Any] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCAmelCase__ : Any = InstructBlipVisionConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = InstructBlipQFormerConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCAmelCase__ : Any = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.text_config.tie_word_embeddings
lowerCAmelCase__ : Any = self.text_config.is_encoder_decoder
lowerCAmelCase__ : int = num_query_tokens
lowerCAmelCase__ : List[str] = self.vision_config.hidden_size
lowerCAmelCase__ : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCAmelCase__ : Optional[Any] = 1.0
lowerCAmelCase__ : Dict = 0.0_2
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ,) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : str = self.vision_config.to_dict()
lowerCAmelCase__ : Union[str, Any] = self.qformer_config.to_dict()
lowerCAmelCase__ : Union[str, Any] = self.text_config.to_dict()
lowerCAmelCase__ : str = self.__class__.model_type
return output
| 184
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A : List[str] = logging.get_logger(__name__)
class _lowercase ( lowercase__):
"""simple docstring"""
def __init__( self : Optional[int] , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 184
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase , __UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
SCREAMING_SNAKE_CASE_ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Union[str, Any]:
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE_ = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'tmp.sql' )
SCREAMING_SNAKE_CASE_ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
SCREAMING_SNAKE_CASE_ = iter_sql_file(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'tmp.sql' )
SCREAMING_SNAKE_CASE_ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
SCREAMING_SNAKE_CASE_ = iter_sql_file(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'tmp.sql' )
SCREAMING_SNAKE_CASE_ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=__UpperCAmelCase ).read()
with pytest.raises(__UpperCAmelCase ):
SqlDatasetWriter(__UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 210
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[int]=10_000 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=2_048 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any="relu" , _lowerCAmelCase : Any=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[int]=6_000 , _lowerCAmelCase : Tuple=1_024 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : str=(5, 5) , _lowerCAmelCase : Optional[int]=1_024 , _lowerCAmelCase : List[Any]=80 , _lowerCAmelCase : List[Any]=1 , **_lowerCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 210
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import random
class UpperCAmelCase :
def __init__( self : Any , __snake_case : int | None = None ) -> str:
_lowerCAmelCase = value
_lowerCAmelCase = random()
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self : Dict ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ) -> str:
_lowerCAmelCase = str(self.value ) + """ """
_lowerCAmelCase = str(self.left or """""" )
_lowerCAmelCase = str(self.right or """""" )
return value + left + right
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCAmelCase , _lowerCAmelCase = split(root.left , lowerCAmelCase )
return left, root
else:
_lowerCAmelCase , _lowerCAmelCase = split(root.right , lowerCAmelCase )
return root, right
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCAmelCase = merge(left.right , lowerCAmelCase )
return left
else:
_lowerCAmelCase = merge(lowerCAmelCase , right.left )
return right
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = Node(lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , value - 1 )
_lowerCAmelCase , _lowerCAmelCase = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
_lowerCAmelCase = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCAmelCase = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
_lowerCAmelCase = input()
while args != "q":
_lowerCAmelCase = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_lowerCAmelCase = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 70
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : str = 32
def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : List[str] =load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Dict =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Dict =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : str =16
elif accelerator.mixed_precision != "no":
a__ : Union[str, Any] =8
else:
a__ : List[str] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Any =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
a__ : int =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
a__ : Tuple =2
# Initialize accelerator
a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Optional[int] =config["lr"]
a__ : Union[str, Any] =int(config["num_epochs"] )
a__ : Any =int(config["seed"] )
a__ : Dict =int(config["batch_size"] )
a__ : int =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE
a__ : Tuple =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
a__ : Optional[int] =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : Dict =model(**SCREAMING_SNAKE_CASE )
a__ : List[Any] =outputs.loss
a__ : List[str] =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__ : Optional[Any] =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : Any =model(**SCREAMING_SNAKE_CASE )
a__ : str =outputs.logits.argmax(dim=-1 )
a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
a__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : str =parser.parse_args()
a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : int =logging.get_logger(__name__)
A__ : str ={
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase ( a_ ):
_lowercase: Optional[Any] = "data2vec-text"
def __init__( self : int , __snake_case : int=3_05_22 , __snake_case : Dict=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Any=12 , __snake_case : List[Any]=30_72 , __snake_case : Dict="gelu" , __snake_case : Tuple=0.1 , __snake_case : Any=0.1 , __snake_case : Optional[Any]=5_12 , __snake_case : Optional[int]=2 , __snake_case : str=0.02 , __snake_case : List[str]=1E-1_2 , __snake_case : Dict=1 , __snake_case : Tuple=0 , __snake_case : int=2 , __snake_case : List[str]="absolute" , __snake_case : Optional[Any]=True , __snake_case : Optional[Any]=None , **__snake_case : Optional[Any] , ) -> Dict:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase ( a_ ):
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 350
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
def __init__( self : str , __snake_case : Optional[int] , __snake_case : Dict=13 , __snake_case : Dict=32 , __snake_case : List[str]=2 , __snake_case : str=3 , __snake_case : str=16 , __snake_case : int=[1, 2, 1] , __snake_case : Dict=[2, 2, 4] , __snake_case : int=2 , __snake_case : str=2.0 , __snake_case : List[str]=True , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : Tuple="gelu" , __snake_case : str=False , __snake_case : Any=True , __snake_case : Union[str, Any]=0.02 , __snake_case : Union[str, Any]=1E-5 , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=None , __snake_case : Any=True , __snake_case : Optional[Any]=10 , __snake_case : Tuple=8 , __snake_case : List[Any]=["stage1", "stage2", "stage3"] , __snake_case : Dict=[1, 2, 3] , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = patch_norm
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = is_training
_lowerCAmelCase = scope
_lowerCAmelCase = use_labels
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = encoder_stride
_lowerCAmelCase = out_features
_lowerCAmelCase = out_indices
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Any:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] ) -> Dict:
_lowerCAmelCase = MaskFormerSwinModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__snake_case ):
_lowerCAmelCase = ["""stem"""]
_lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case )
def lowercase__ ( self : Union[str, Any] ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_lowercase: List[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
_lowercase: Optional[Any] = False
_lowercase: List[str] = False
_lowercase: Optional[Any] = False
_lowercase: str = False
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Optional[Any] ) -> str:
_lowerCAmelCase = MaskFormerSwinModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowercase__ ( self : Tuple ) -> str:
pass
def lowercase__ ( self : str ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : str ) -> Any:
return
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : Optional[int] ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowercase__ ( self : str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowercase__ ( self : Any ) -> Union[str, Any]:
pass
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowercase__ ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowercase__ ( self : List[str] ) -> Any:
pass
def lowercase__ ( self : Tuple , __snake_case : Optional[int] , __snake_case : str , __snake_case : str , __snake_case : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# Swin has a different seq_length
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : List[str] ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Any ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowercase__ ( self : List[Any] ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Any ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Dict ) -> Optional[int]:
pass
def lowercase__ ( self : List[str] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__snake_case : List[str] ):
_lowerCAmelCase = 0
return t
def check_equivalence(__snake_case : Any , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Dict={} ):
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case )
_lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple()
def recursive_check(__snake_case : int , __snake_case : Any ):
if isinstance(__snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ):
recursive_check(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__snake_case , __snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__snake_case ) , set_nan_tensor_to_zero(__snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}. Dict has"
f" `nan`: {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}."
) , )
recursive_check(__snake_case , __snake_case )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , snake_case_ ):
_lowercase: int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_lowercase: Dict = MaskFormerSwinConfig
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = MaskFormerSwinModelTester(self )
def lowercase__ ( self : str ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowerCAmelCase = backbone_class(__snake_case )
backbone.to(__snake_case )
backbone.eval()
_lowerCAmelCase = backbone(**__snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowerCAmelCase = backbone(**__snake_case , output_hidden_states=__snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowerCAmelCase = backbone(**__snake_case , output_attentions=__snake_case )
self.assertIsNotNone(outputs.attentions )
| 220
| 0
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_SCREAMING_SNAKE_CASE : Dict = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b"
_SCREAMING_SNAKE_CASE : Optional[Any] = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b"
_SCREAMING_SNAKE_CASE : Any = max(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE__ ) , b_binary.zfill(SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 200
|
'''simple docstring'''
import argparse
import struct
import unittest
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = data
# Initialize hash values
_SCREAMING_SNAKE_CASE : Tuple = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
_SCREAMING_SNAKE_CASE : int = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
_SCREAMING_SNAKE_CASE : Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase_ ( __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = B"""\x80""" + (B"""\x00""" * (63 - (len(__snake_case ) + 8) % 64))
_SCREAMING_SNAKE_CASE : List[str] = struct.pack(""">Q""" , (len(__snake_case ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase_ ( self ):
# Convert into blocks of 64 bytes
_SCREAMING_SNAKE_CASE : Any = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_SCREAMING_SNAKE_CASE : List[Any] = list(struct.unpack(""">16L""" , __snake_case ) )
# add 48 0-ed integers
words += [0] * 48
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_SCREAMING_SNAKE_CASE : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_SCREAMING_SNAKE_CASE : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_SCREAMING_SNAKE_CASE : Tuple = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
_SCREAMING_SNAKE_CASE : Any = self.ror(__snake_case , 6 ) ^ self.ror(__snake_case , 11 ) ^ self.ror(__snake_case , 25 )
_SCREAMING_SNAKE_CASE : str = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
_SCREAMING_SNAKE_CASE : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
_SCREAMING_SNAKE_CASE : Dict = self.ror(__snake_case , 2 ) ^ self.ror(__snake_case , 13 ) ^ self.ror(__snake_case , 22 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (a & b) ^ (a & c) ^ (b & c)
_SCREAMING_SNAKE_CASE : Dict = (sa + maj) % 0X1_0000_0000
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
_SCREAMING_SNAKE_CASE : Tuple = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
_SCREAMING_SNAKE_CASE : Dict = """""".join([hex(__snake_case )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
import hashlib
_SCREAMING_SNAKE_CASE : Tuple = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(__snake_case ).hash , hashlib.shaaaa(__snake_case ).hexdigest() )
def snake_case_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
_SCREAMING_SNAKE_CASE : Tuple = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.read()
else:
_SCREAMING_SNAKE_CASE : List[Any] = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )
print(SHAaaa(SCREAMING_SNAKE_CASE__ ).hash )
if __name__ == "__main__":
main()
| 200
| 1
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase__ = os.path.join(git_repo_path, '''src''', '''transformers''')
lowerCAmelCase__ = '''
{0} = None
'''
lowerCAmelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
lowerCAmelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowercase__ )
__lowercase = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowercase__ ,'''tokenizers''' )
__lowercase = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowercase__ ,'''tensorflow_text''' )
__lowercase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowercase__ ,'''sentencepiece_and_tokenizers''' )
__lowercase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowercase__ ,'''sentencepiece_and_tensorflow_text''' )
__lowercase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowercase__ ,'''sentencepiece_and_tokenizers_and_vision''' )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' ,lowercase__ )
self.assertIn('''tensorflow_text''' ,lowercase__ )
self.assertIn('''sentencepiece_and_tokenizers''' ,lowercase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' ,objects['''torch'''] )
self.assertIn('''TFBertModel''' ,objects['''tf'''] )
self.assertIn('''FlaxBertModel''' ,objects['''flax'''] )
self.assertIn('''BertModel''' ,objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' ,objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' ,objects['''sentencepiece_and_tokenizers'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = create_dummy_object('''CONSTANT''' ,'''\'torch\'''' )
self.assertEqual(lowercase__ ,'''\nCONSTANT = None\n''' )
__lowercase = create_dummy_object('''function''' ,'''\'torch\'''' )
self.assertEqual(
lowercase__ ,'''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__lowercase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__lowercase = create_dummy_object('''FakeClass''' ,'''\'torch\'''' )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__lowercase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] ,lowercase__ )
| 52
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
stooge(A__ , 0 , len(A__ ) - 1 )
return arr
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__lowercase , __lowercase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__lowercase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(A__ , i + t , (A__) )
# Recursively sort first 2/3 elements
stooge(A__ , A__ , (h - t) )
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 52
| 1
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = RoCBertTokenizer
A__ = None
A__ = False
A__ = True
A__ = filter_non_english
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Tuple = {}
for i, value in enumerate(__lowerCamelCase ):
lowerCamelCase__ : str = i
lowerCamelCase__ : Optional[Any] = i
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(__lowerCamelCase , __lowerCamelCase , ensure_ascii=__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ : Optional[int] = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(__lowerCamelCase , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCamelCase__ : str = {}
for i, token in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Tuple = i
lowerCamelCase__ : Optional[int] = RoCBertWordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
lowerCamelCase__ : Optional[int] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Tuple = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase__ : Optional[Any] = tokenizer_r.encode_plus(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase , )
lowerCamelCase__ : int = tokenizer_r.do_lower_case if hasattr(__lowerCamelCase , "do_lower_case" ) else False
lowerCamelCase__ : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ["的", "人", "有"]
lowerCamelCase__ : Tuple = "".join(__lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : int = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Optional[int] = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : int = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : Dict = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
lowerCamelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer_r.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : List[str] = tokenizer_p.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : int = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
lowerCamelCase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase__ : Dict = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(__lowerCamelCase )
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase__ : int = tokenizer.encode("你好" , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : Dict = tokenizer.encode("你是谁" , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : int = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : Union[str, Any] = "你好,你是谁"
lowerCamelCase__ : Tuple = tokenizer.tokenize(__lowerCamelCase )
lowerCamelCase__ : str = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase )
lowerCamelCase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = tokenizer.prepare_for_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , add_special_tokens=__lowerCamelCase )
lowerCamelCase__ : int = tokenizer.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 184
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_ ( _A : Optional[int] , _A : bool = True , _A : float = math.inf , _A : float = -math.inf , _A : float = math.inf , _A : float = -math.inf , _A : bool = False , _A : float = 100 , _A : float = 0.01 , _A : float = 1 , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Optional[Any] = search_prob
lowerCamelCase__ : List[str] = start_temperate
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Optional[Any] = None
while not search_end:
lowerCamelCase__ : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase__ : List[Any] = current_state
scores.append(_A )
iterations += 1
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase__ : Any = random.randint(0 , len(_A ) - 1 ) # picking a random neighbor
lowerCamelCase__ : List[Any] = neighbors.pop(_A )
lowerCamelCase__ : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase__ : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase__ : Optional[Any] = picked_neighbor
else:
lowerCamelCase__ : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase__ : Optional[int] = picked_neighbor
lowerCamelCase__ : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase__ : int = True
else:
lowerCamelCase__ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_A ) , _A )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_ ( _A : List[str] , _A : int ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : Dict = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
A : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowercase_ ( _A : int , _A : Any ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
A : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
A : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
| 184
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCAmelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase :Tuple = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Any = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __snake_case ( self : Dict , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : int=None ):
'''simple docstring'''
lowercase :Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase :List[str] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase :List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowercase :List[str] = black.format_str(snake_case__ , mode=snake_case__ )
lowercase :Dict = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(snake_case__ , '''w''' , newline='''\n''' ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , '''r''' ) as f:
self.assertTrue(f.read() , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[int] = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(snake_case__ , snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , snake_case__ ) , )
# Copy consistency with a really long name
lowercase :Union[str, Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , snake_case__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , snake_case__ ) , )
| 365
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = KandinskyVaaPipeline
__A : Any = [
"image_embeds",
"negative_image_embeds",
]
__A : Tuple = ["image_embeds", "negative_image_embeds"]
__A : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__A : Union[str, Any] = False
@property
def __snake_case ( self : str ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 1_0_0
@property
def __snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Optional[Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase :int = UNetaDConditionModel(**snake_case__ )
return model
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = self.dummy_unet
lowercase :List[Any] = self.dummy_movq
lowercase :Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
lowercase :str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __snake_case ( self : str , snake_case__ : Any , snake_case__ : str=0 ):
'''simple docstring'''
lowercase :Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase :Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
lowercase :Optional[int] = torch.manual_seed(snake_case__ )
else:
lowercase :Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase :List[Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[Any] = '''cpu'''
lowercase :Tuple = self.get_dummy_components()
lowercase :Any = self.pipeline_class(**snake_case__ )
lowercase :List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase :Optional[Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase :str = output.images
lowercase :Dict = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase :Any = image[0, -3:, -3:, -1]
lowercase :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase :List[Any] = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowercase :int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase :Tuple = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase :str = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase :int = '''red cat, 4k photo'''
lowercase :str = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase , lowercase :Union[str, Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase :Tuple = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase :List[Any] = pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_0_0 , output_type='''np''' , )
lowercase :Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 172
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowercase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__lowercase = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCAmelCase__ )
__lowercase = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(lowerCAmelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 210
|
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__a : Dict = ['''keras_nlp''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''keras_nlp'''] )
| 210
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 355
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Tuple = RoCBertTokenizer
a__ : List[Any] = None
a__ : List[Any] = False
a__ : Dict = True
a__ : int = filter_non_english
def a ( self : Optional[int] ):
super().setUp()
__UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for i, value in enumerate(_lowercase ):
__UpperCAmelCase = i
__UpperCAmelCase = i
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCAmelCase = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_lowercase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
def a ( self : List[Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a ( self : List[Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : Optional[int] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : Any ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : int ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__UpperCAmelCase = {}
for i, token in enumerate(_lowercase ):
__UpperCAmelCase = i
__UpperCAmelCase = RoCBertWordpieceTokenizer(vocab=_lowercase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a ( self : Dict ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a ( self : Optional[int] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a ( self : Tuple ):
__UpperCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
__UpperCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowercase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def a ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(
_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase , )
__UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(_lowercase , '''do_lower_case''' ) else False
__UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a ( self : Dict ):
__UpperCAmelCase = ['''的''', '''人''', '''有''']
__UpperCAmelCase = ''''''.join(_lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = True
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowercase )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = False
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowercase )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowercase )
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def a ( self : List[Any] ):
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCAmelCase = tokenizer.encode('''你好''' , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer.encode('''你是谁''' , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowercase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def a ( self : List[str] ):
__UpperCAmelCase = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = '''你好,你是谁'''
__UpperCAmelCase = tokenizer.tokenize(_lowercase )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase )
__UpperCAmelCase = tokenizer.convert_tokens_to_shape_ids(_lowercase )
__UpperCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(_lowercase )
__UpperCAmelCase = tokenizer.prepare_for_model(
_lowercase , _lowercase , _lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer.encode_plus(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 86
| 0
|
"""simple docstring"""
from itertools import product
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = sides_number
UpperCAmelCase_ : Optional[Any] = max_face_number * dice_number
UpperCAmelCase_ : Optional[Any] = [0] * (max_total + 1)
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : int = range(__lowerCamelCase, max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase, repeat=__lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __a ( ):
UpperCAmelCase_ : Optional[Any] = total_frequency_distribution(
sides_number=4, dice_number=9 )
UpperCAmelCase_ : Union[str, Any] = total_frequency_distribution(
sides_number=6, dice_number=6 )
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Dict = 9
UpperCAmelCase_ : Tuple = 4 * 9
UpperCAmelCase_ : int = 6
for peter_total in range(__lowerCamelCase, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCAmelCase_ : List[Any] = (4**9) * (6**6)
UpperCAmelCase_ : List[str] = peter_wins_count / total_games_number
UpperCAmelCase_ : Any = round(__lowerCamelCase, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCamelCase : Any = TypeVar('T')
class a ( Generic[T] ):
def __init__( self , _lowerCamelCase ):
lowercase = data
lowercase = None
def __str__( self ):
return F'{self.data}'
class a ( Generic[T] ):
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(_lowerCamelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def UpperCamelCase_ ( self ):
return self.top is None
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = Node(_lowerCamelCase )
if not self.is_empty():
lowercase = self.top
lowercase = node
def UpperCamelCase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _lowerCamelCase )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def UpperCamelCase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase_ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220
| 0
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = np.inf
def set_batch_size(UpperCAmelCase_ ) -> None:
nonlocal batch_size
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = min(UpperCAmelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = min(UpperCAmelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and feature.dtype == "binary":
UpperCAmelCase : Optional[int] = min(UpperCAmelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCAmelCase_ , UpperCAmelCase_ )
return None if batch_size is np.inf else batch_size
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ) -> List[str]:
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
UpperCAmelCase : Optional[Any] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
UpperCAmelCase : Tuple = _PACKAGED_DATASETS_MODULES['parquet'][1]
UpperCAmelCase : str = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
UpperCAmelCase : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
UpperCAmelCase : Optional[Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : List[Any] , ) -> Dict:
UpperCAmelCase : List[str] = dataset
UpperCAmelCase : Tuple = path_or_buf
UpperCAmelCase : List[Any] = batch_size or get_writer_batch_size(dataset.features )
UpperCAmelCase : Tuple = parquet_writer_kwargs
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
UpperCAmelCase : List[str] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs )
else:
UpperCAmelCase : Tuple = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs )
return written
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[Any] ) -> int:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Tuple = parquet_writer_kwargs.pop('path_or_buf' , lowercase_ )
UpperCAmelCase : int = self.dataset.features.arrow_schema
UpperCAmelCase : int = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowercase_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
UpperCAmelCase : Any = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_ )
written += batch.nbytes
writer.close()
return written
| 280
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """perceiver"""
def __init__( self : str , lowercase_ : List[str]=256 , lowercase_ : List[str]=1_280 , lowercase_ : str=768 , lowercase_ : Tuple=1 , lowercase_ : str=26 , lowercase_ : List[Any]=8 , lowercase_ : int=8 , lowercase_ : List[str]=None , lowercase_ : Dict=None , lowercase_ : int="kv" , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=1 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=262 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Optional[int]=56 , lowercase_ : int=[368, 496] , lowercase_ : str=16 , lowercase_ : Optional[int]=1_920 , lowercase_ : Tuple=16 , lowercase_ : int=[1, 16, 224, 224] , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = num_latents
UpperCAmelCase : List[Any] = d_latents
UpperCAmelCase : Dict = d_model
UpperCAmelCase : Dict = num_blocks
UpperCAmelCase : Optional[int] = num_self_attends_per_block
UpperCAmelCase : Optional[Any] = num_self_attention_heads
UpperCAmelCase : Optional[Any] = num_cross_attention_heads
UpperCAmelCase : Tuple = qk_channels
UpperCAmelCase : Optional[int] = v_channels
UpperCAmelCase : str = cross_attention_shape_for_attention
UpperCAmelCase : Union[str, Any] = self_attention_widening_factor
UpperCAmelCase : List[Any] = cross_attention_widening_factor
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Any = use_query_residual
# masked language modeling attributes
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
# image classification attributes
UpperCAmelCase : str = image_size
# flow attributes
UpperCAmelCase : Any = train_size
# multimodal autoencoding attributes
UpperCAmelCase : Any = num_frames
UpperCAmelCase : List[Any] = audio_samples_per_frame
UpperCAmelCase : Tuple = samples_per_patch
UpperCAmelCase : Union[str, Any] = output_shape
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
return 1E-4
def UpperCAmelCase_ ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : int = preprocessor.num_special_tokens_to_add(lowercase_ )
UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : List[Any] = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase : Union[str, Any] = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Union[str, Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase : Any = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Dict = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 280
| 1
|
def A_ ( _lowerCAmelCase = 1000 ) -> int:
UpperCamelCase , UpperCamelCase : List[Any] = 1, 1
UpperCamelCase : Union[str, Any] = 2
while True:
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : Tuple = fa + fa
UpperCamelCase , UpperCamelCase : Optional[Any] = fa, f
index += 1
for _ in str(_lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 52
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52
| 1
|
def lowerCamelCase__ ( a__ : int , a__ : int , a__ : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase_ = _modexpt(a__ , exponent // 2 , a__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(a__ , exponent - 1 , a__ )) % modulo_value
def lowerCamelCase__ ( a__ : int = 1777 , a__ : int = 1855 , a__ : int = 8 ) -> int:
UpperCamelCase_ = base
for _ in range(1 , a__ ):
UpperCamelCase_ = _modexpt(a__ , a__ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[str] = """align_text_model"""
def __init__( self , __UpperCamelCase=3_0_5_2_2 , __UpperCamelCase=7_6_8 , __UpperCamelCase=1_2 , __UpperCamelCase=1_2 , __UpperCamelCase=3_0_7_2 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = pad_token_id
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCamelCase_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = """align_vision_model"""
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 6_0_0 , __UpperCamelCase = 2.0 , __UpperCamelCase = 3.1 , __UpperCamelCase = 8 , __UpperCamelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __UpperCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __UpperCamelCase = [] , __UpperCamelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase = 0.25 , __UpperCamelCase = "swish" , __UpperCamelCase = 2_5_6_0 , __UpperCamelCase = "mean" , __UpperCamelCase = 0.02 , __UpperCamelCase = 0.001 , __UpperCamelCase = 0.99 , __UpperCamelCase = 0.2 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = width_coefficient
UpperCamelCase_ = depth_coefficient
UpperCamelCase_ = depth_divisor
UpperCamelCase_ = kernel_sizes
UpperCamelCase_ = in_channels
UpperCamelCase_ = out_channels
UpperCamelCase_ = depthwise_padding
UpperCamelCase_ = strides
UpperCamelCase_ = num_block_repeats
UpperCamelCase_ = expand_ratios
UpperCamelCase_ = squeeze_expansion_ratio
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = pooling_type
UpperCamelCase_ = initializer_range
UpperCamelCase_ = batch_norm_eps
UpperCamelCase_ = batch_norm_momentum
UpperCamelCase_ = drop_connect_rate
UpperCamelCase_ = sum(__UpperCamelCase ) * 4
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
UpperCamelCase_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Tuple = """align"""
A__ : int = True
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=6_4_0 , __UpperCamelCase=1.0 , __UpperCamelCase=0.02 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if text_config is None:
UpperCamelCase_ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
UpperCamelCase_ = AlignTextConfig(**__UpperCamelCase )
UpperCamelCase_ = AlignVisionConfig(**__UpperCamelCase )
UpperCamelCase_ = projection_dim
UpperCamelCase_ = temperature_init_value
UpperCamelCase_ = initializer_range
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 261
| 0
|
a_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a_ = [{"type": "code", "content": INSTALL_CONTENT}]
a_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 175
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a : Any= "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 172
| 0
|
from __future__ import annotations
lowerCAmelCase__ : Optional[int] ='''#'''
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._trie
for char in text:
if char not in trie:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = trie[char]
__SCREAMING_SNAKE_CASE = True
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._trie
for char in prefix:
if char in trie:
__SCREAMING_SNAKE_CASE = trie[char]
else:
return []
return self._elements(_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for c, v in d.items():
__SCREAMING_SNAKE_CASE = [' '] if c == END else [(c + s) for s in self._elements(_A )]
result.extend(_A )
return tuple(_A )
lowerCAmelCase__ : List[Any] =Trie()
lowerCAmelCase__ : List[str] =('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __lowercase ( a__ ) -> tuple:
__SCREAMING_SNAKE_CASE = trie.find_word(a__ )
return tuple(string + word for word in suffixes )
def __lowercase ( ) -> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 354
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowercase ( a__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
__SCREAMING_SNAKE_CASE = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
__SCREAMING_SNAKE_CASE = [5, 5, 5, 5]
elif "fl4" in model_name:
__SCREAMING_SNAKE_CASE = [4, 4, 4, 4]
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "lrf" in model_name:
__SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
else:
__SCREAMING_SNAKE_CASE = [2, 2, 2, 2]
if "tiny" in model_name:
__SCREAMING_SNAKE_CASE = 96
elif "small" in model_name:
__SCREAMING_SNAKE_CASE = 96
elif "base" in model_name:
__SCREAMING_SNAKE_CASE = 1_28
elif "large" in model_name:
__SCREAMING_SNAKE_CASE = 1_92
elif "xlarge" in model_name:
__SCREAMING_SNAKE_CASE = 2_56
elif "huge" in model_name:
__SCREAMING_SNAKE_CASE = 3_52
# set label information
__SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__SCREAMING_SNAKE_CASE = 'imagenet-22k-id2label.json'
else:
__SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = FocalNetConfig(
embed_dim=a__ , depths=a__ , focal_levels=a__ , focal_windows=a__ , use_conv_embed=a__ , idalabel=a__ , labelaid=a__ , use_post_layernorm=a__ , use_layerscale=a__ , )
return config
def __lowercase ( a__ ) -> Any:
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__SCREAMING_SNAKE_CASE = 'encoder.' + name
if "encoder.layers" in name:
__SCREAMING_SNAKE_CASE = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__SCREAMING_SNAKE_CASE = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE = 'layernorm.weight'
if name == "norm.bias":
__SCREAMING_SNAKE_CASE = 'layernorm.bias'
if "head" in name:
__SCREAMING_SNAKE_CASE = name.replace('head' , 'classifier' )
else:
__SCREAMING_SNAKE_CASE = 'focalnet.' + name
return name
def __lowercase ( a__ , a__ , a__=False ) -> Dict:
# fmt: off
__SCREAMING_SNAKE_CASE = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
print('Checkpoint URL: ' , a__ )
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(a__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = get_focalnet_config(a__ )
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification(a__ )
model.eval()
# load state dict
model.load_state_dict(a__ )
# verify conversion
__SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__SCREAMING_SNAKE_CASE = BitImageProcessor(
do_resize=a__ , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=a__ , crop_size=2_24 , do_normalize=a__ , image_mean=a__ , image_std=a__ , )
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__SCREAMING_SNAKE_CASE = image_transforms(a__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , a__ , atol=1E-4 )
__SCREAMING_SNAKE_CASE = model(**a__ )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__SCREAMING_SNAKE_CASE = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__SCREAMING_SNAKE_CASE = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__SCREAMING_SNAKE_CASE = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ : List[Any] =parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 118
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : str = []
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
F'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
F'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
F'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
F'stage{idx}.patch_embed.norm.bias',
) )
return embed
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = []
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
F'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
F'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
F'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
F'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Tuple = []
token.append((F'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = '''imagenet-1k-id2label.json'''
A : str = 1000
A : Tuple = '''huggingface/label-files'''
A : List[Any] = num_labels
A : List[Any] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='''dataset''' ) ) , '''r''' ) )
A : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
A : int = idalabel
A : Any = {v: k for k, v in idalabel.items()}
A : List[str] = CvtConfig(num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
A : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
A : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A : Optional[Any] = [2, 2, 20]
A : int = [3, 12, 16]
A : str = [192, 768, 1024]
A : Union[str, Any] = CvtForImageClassification(snake_case__ )
A : Optional[int] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
A : List[str] = image_size
A : Dict = torch.load(snake_case__ , map_location=torch.device('''cpu''' ) )
A : int = OrderedDict()
A : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A : Optional[int] = list_of_state_dict + cls_token(snake_case__ )
A : List[Any] = list_of_state_dict + embeddings(snake_case__ )
for cnt in range(config.depth[idx] ):
A : Dict = list_of_state_dict + attention(snake_case__ , snake_case__ )
A : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case__ )
for i in range(len(snake_case__ ) ):
A : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_84,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase : Union[str, Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 86
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
a = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
a = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
a = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict="binary" , _UpperCAmelCase : Any=None ):
_A = fa_score(
_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase , pos_label=_UpperCAmelCase , average=_UpperCAmelCase , sample_weight=_UpperCAmelCase )
return {"f1": float(_UpperCAmelCase ) if score.size == 1 else score}
| 353
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : bool , _snake_case : bool ) -> Tuple:
'''simple docstring'''
def run_func(_snake_case : Any ):
@wraps(_snake_case )
def run_in_eager_mode(*_snake_case : List[str] , **_snake_case : Tuple ):
return func(*_snake_case , **_snake_case )
@wraps(_snake_case )
@tf.function(experimental_compile=_snake_case )
def run_in_graph_mode(*_snake_case : Dict , **_snake_case : Tuple ):
return func(*_snake_case , **_snake_case )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int ) -> ["tf.Tensor"]:
'''simple docstring'''
_A = random.Random()
_A = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : TensorFlowBenchmarkArguments
UpperCAmelCase : PretrainedConfig
UpperCAmelCase : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : str ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
# initialize GPU on separate process
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_A = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('transformers' , fromlist=[model_class] )
_A = getattr(_UpperCAmelCase , _UpperCAmelCase )
_A = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_A = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_A = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , training=_UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_UpperCAmelCase , training=_UpperCAmelCase )
_A = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_A = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('transformers' , fromlist=[model_class] )
_A = getattr(_UpperCAmelCase , _UpperCAmelCase )
_A = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_A = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_A = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_A = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_A = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_A = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
_A = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : int ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(_UpperCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_A = timeit.repeat(
_UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(_UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
_A = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
_A = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
_A = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_A = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase )
_A = meminfo.used
_A = Memory(_UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
_A = None
else:
_A = measure_peak_memory_cpu(_UpperCAmelCase )
_A = Memory(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_A = stop_memory_tracing(_UpperCAmelCase )
if memory is None:
_A = summary.total
else:
_A = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 271
| 0
|
from PIL import Image
def _SCREAMING_SNAKE_CASE ( a ) -> Image:
__A , __A : Union[str, Any] = image.size
__A : Dict = 0
__A : Tuple = image.load()
for i in range(a ):
for j in range(a ):
__A : Dict = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(a ):
for i in range(a ):
__A : Any = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 280
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A:
"""simple docstring"""
def __init__( self , _A = None ):
if components is None:
__A : int = []
__A : Tuple = list(_A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_A , self.__components ) ) + ")"
def __add__( self , _A ):
__A : Optional[int] = len(self )
if size == len(_A ):
__A : Any = [self.__components[i] + other.component(_A ) for i in range(_A )]
return Vector(_A )
else:
raise Exception('must have the same size' )
def __sub__( self , _A ):
__A : Tuple = len(self )
if size == len(_A ):
__A : Union[str, Any] = [self.__components[i] - other.component(_A ) for i in range(_A )]
return Vector(_A )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , (float, int) ):
__A : str = [c * other for c in self.__components]
return Vector(_A )
elif isinstance(_A , _A ) and len(self ) == len(_A ):
__A : Union[str, Any] = len(self )
__A : Dict = [self.__components[i] * other.component(_A ) for i in range(_A )]
return sum(_A )
else: # error case
raise Exception('invalid operand!' )
def UpperCAmelCase_ ( self ):
return Vector(self.__components )
def UpperCAmelCase_ ( self , _A ):
if isinstance(_A , _A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCAmelCase_ ( self , _A , _A ):
assert -len(self.__components ) <= pos < len(self.__components )
__A : Optional[int] = value
def UpperCAmelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__A : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_A ) )
def UpperCAmelCase_ ( self , _A , _A = False ):
__A : Optional[Any] = self * other
__A : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( a ) -> Vector:
assert isinstance(a , a )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Vector:
assert isinstance(a , a ) and (isinstance(a , a ))
__A : Optional[Any] = [0] * dimension
__A : Tuple = 1
return Vector(a )
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Vector:
random.seed(a )
__A : str = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _A:
"""simple docstring"""
def __init__( self , _A , _A , _A ):
__A : Optional[Any] = matrix
__A : Dict = w
__A : Optional[int] = h
def __str__( self ):
__A : Tuple = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Optional[Any] = []
for i in range(self.__height ):
__A : Optional[Any] = [
self.__matrix[i][j] + other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _A ):
if self.__width == other.width() and self.__height == other.height():
__A : Tuple = []
for i in range(self.__height ):
__A : str = [
self.__matrix[i][j] - other.component(_A , _A )
for j in range(self.__width )
]
matrix.append(_A )
return Matrix(_A , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _A ):
...
@overload
def __mul__( self , _A ):
...
def __mul__( self , _A ):
if isinstance(_A , _A ): # matrix-vector
if len(_A ) == self.__width:
__A : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
__A : List[str] = [
self.__matrix[i][j] * other.component(_A )
for j in range(self.__width )
]
ans.change_component(_A , sum(_A ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_A , (int, float) ): # matrix-scalar
__A : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_A , self.__width , self.__height )
return None
def UpperCAmelCase_ ( self ):
return self.__height
def UpperCAmelCase_ ( self ):
return self.__width
def UpperCAmelCase_ ( self , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A , _A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
__A : int = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_A ) ):
__A : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_A , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCAmelCase_ ( self , _A , _A ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_A , _A )
else:
raise Exception('Indices out of bounds' )
def UpperCAmelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__A : List[str] = [
self.__matrix[0][y] * self.cofactor(0 , _A ) for y in range(self.__width )
]
return sum(_A )
def _SCREAMING_SNAKE_CASE ( a ) -> Matrix:
__A : list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> Matrix:
random.seed(a )
__A : list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 280
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : Tuple , *_lowerCAmelCase : str , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Any , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Any , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : int):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Dict , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Any , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : List[str] , *_lowerCAmelCase : str , **_lowerCAmelCase : int):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : List[str] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : int , *_lowerCAmelCase : Any , **_lowerCAmelCase : Any):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : Optional[int] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : str , *_lowerCAmelCase : str , **_lowerCAmelCase : Any):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Tuple , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : List[str] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Union[str, Any] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Optional[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : List[str] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Optional[int] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Any):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : str):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : str , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : int , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Dict):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : Optional[int] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Any , *_lowerCAmelCase : str , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Optional[int] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : Tuple , *_lowerCAmelCase : str , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Union[str, Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Dict , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : Any , *_lowerCAmelCase : Any , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : str , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Optional[int] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : Union[str, Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Tuple , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : Tuple , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str]):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Tuple , *_lowerCAmelCase : int , **_lowerCAmelCase : Tuple):
'''simple docstring'''
requires_backends(cls , ['flax'])
class _UpperCamelCase ( metaclass=lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ["""flax"""]
def __init__( self : List[Any] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Dict):
'''simple docstring'''
requires_backends(self , ['flax'])
@classmethod
def __lowerCamelCase ( cls : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Dict):
'''simple docstring'''
requires_backends(cls , ['flax'])
@classmethod
def __lowerCamelCase ( cls : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Dict):
'''simple docstring'''
requires_backends(cls , ['flax'])
| 366
|
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCAmelCase ).json()
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase ='https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__lowercase =requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase =hackernews_top_stories(_lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 0
|
import mpmath # for roots of unity
import numpy as np
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : str=None):
# Input as list
__lowerCamelCase : Optional[int] = list(poly_a or [0])[:]
__lowerCamelCase : Dict = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowerCamelCase : Optional[int] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
__lowerCamelCase : Any = len(self.polyB)
# Add 0 to make lengths equal a power of 2
__lowerCamelCase : int = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
__lowerCamelCase : Tuple = complex(mpmath.root(x=1 ,n=self.c_max_length ,k=1))
# The product
__lowerCamelCase : Optional[Any] = self.__multiply()
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__) <= 1:
return dft[0]
#
__lowerCamelCase : str = self.c_max_length // 2
while next_ncol > 0:
__lowerCamelCase : Optional[Any] = [[] for i in range(SCREAMING_SNAKE_CASE__)]
__lowerCamelCase : Optional[int] = self.root**next_ncol
# First half of next step
__lowerCamelCase : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(SCREAMING_SNAKE_CASE__):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
__lowerCamelCase : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(SCREAMING_SNAKE_CASE__):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
__lowerCamelCase : Union[str, Any] = new_dft
__lowerCamelCase : Optional[Any] = next_ncol // 2
return dft[0]
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Optional[int] = self.__dft('A')
__lowerCamelCase : Dict = self.__dft('B')
__lowerCamelCase : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
__lowerCamelCase : int = 2
while next_ncol <= self.c_max_length:
__lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE__)]
__lowerCamelCase : Dict = self.root ** (next_ncol // 2)
__lowerCamelCase : Union[str, Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
__lowerCamelCase : List[Any] = new_inverse_c
next_ncol *= 2
# Unpack
__lowerCamelCase : Optional[Any] = [round(x[0].real ,8) + round(x[0].imag ,8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int):
__lowerCamelCase : Union[str, Any] = 'A = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A]))
__lowerCamelCase : Tuple = 'B = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B]))
__lowerCamelCase : List[Any] = 'A*B = ' + ' + '.join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product))
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261
| 0
|
from __future__ import annotations
from math import pi
def lowerCAmelCase__ ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCAmelCase__ ( _UpperCamelCase : int = 8 ) -> str:
"""simple docstring"""
snake_case = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> str:
"""simple docstring"""
i -= len(_UpperCamelCase )
snake_case = i // 3
snake_case = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case = (
chars_incl
+ random(_UpperCamelCase , quotient + remainder )
+ random(_UpperCamelCase , _UpperCamelCase )
+ random(_UpperCamelCase , _UpperCamelCase )
)
snake_case = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int = 8 ) -> bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case = any(char in ascii_uppercase for char in password )
snake_case = any(char in ascii_lowercase for char in password )
snake_case = any(char in digits for char in password )
snake_case = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCAmelCase__ ( ) -> Any:
"""simple docstring"""
snake_case = int(input('Please indicate the max length of your password: ' ).strip() )
snake_case = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_UpperCamelCase ) )
print(
'Alternative Password generated:' , alternative_password_generator(_UpperCamelCase , _UpperCamelCase ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 149
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import gcd
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 1 , UpperCAmelCase = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
return (pow(__UpperCamelCase , 2 ) + step) % modulus
for _ in range(__UpperCamelCase ):
# These track the position within the cycle detection logic.
lowercase__ : Any = seed
lowercase__ : Dict = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowercase__ : List[str] = rand_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowercase__ : List[str] = rand_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowercase__ : Tuple = rand_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowercase__ : Optional[Any] = gcd(hare - tortoise , __UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowercase__ : Tuple = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__a: Dict = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
__a: Dict = parser.parse_args()
__a: Union[str, Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'{args.num} is probably prime')
else:
__a: Optional[Any] = args.num // divisor
print(F'{args.num} = {divisor} * {quotient}')
| 198
|
import math
from datetime import datetime, timedelta
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = year % 1_9
SCREAMING_SNAKE_CASE_ = year % 4
SCREAMING_SNAKE_CASE_ = year % 7
SCREAMING_SNAKE_CASE_ = math.floor(year / 1_0_0 )
SCREAMING_SNAKE_CASE_ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
SCREAMING_SNAKE_CASE_ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE_ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
SCREAMING_SNAKE_CASE_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE_ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_8 )
else:
return datetime(__UpperCamelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
A : Dict = "will be" if year > datetime.now().year else "was"
print(f"Easter in {year} {tense} {gauss_easter(year)}")
| 118
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "WhisperFeatureExtractor"
lowercase = "WhisperTokenizer"
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = self.feature_extractor
__UpperCamelCase = False
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = kwargs.pop('audio' , __UpperCAmelCase )
__UpperCamelCase = kwargs.pop('sampling_rate' , __UpperCAmelCase )
__UpperCamelCase = kwargs.pop('text' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
__UpperCamelCase = args[0]
__UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__UpperCamelCase = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCamelCase = encodings['input_ids']
return inputs
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
| 263
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[128, 256, 384] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = kernel_size
__UpperCamelCase = stride
__UpperCamelCase = padding
__UpperCamelCase = hidden_sizes
__UpperCamelCase = num_attention_heads
__UpperCamelCase = depths
__UpperCamelCase = key_dim
__UpperCamelCase = drop_path_rate
__UpperCamelCase = patch_size
__UpperCamelCase = attention_ratio
__UpperCamelCase = mlp_ratio
__UpperCamelCase = initializer_range
__UpperCamelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = num_labels
__UpperCamelCase = initializer_range
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = LevitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__UpperCamelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = LevitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCamelCase = outputs.hidden_states
__UpperCamelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCamelCase = (self.model_tester.image_size, self.model_tester.image_size)
__UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1]
for _ in range(4 ):
__UpperCamelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__UpperCamelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__UpperCamelCase = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
__UpperCamelCase = problem_type['title']
__UpperCamelCase = problem_type['num_labels']
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
__UpperCamelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__UpperCamelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
__UpperCamelCase = model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = LevitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Union[str, Any]:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 263
| 1
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
def decorator(lowercase__ : Any ):
lowerCAmelCase_ :Optional[Any] = getattr(lowercase__ , """handle_key""" , [] )
handle += [key]
setattr(lowercase__ , """handle_key""" , lowercase__ )
return func
return decorator
def _snake_case ( *lowercase__ : List[str] ) -> List[str]:
'''simple docstring'''
def decorator(lowercase__ : List[str] ):
lowerCAmelCase_ :List[str] = getattr(lowercase__ , """handle_key""" , [] )
handle += keys
setattr(lowercase__ , """handle_key""" , lowercase__ )
return func
return decorator
class _SCREAMING_SNAKE_CASE ( A__ ):
def __new__( cls , __A , __A , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = super().__new__(cls , __A , __A , __A )
if not hasattr(__A , """key_handler""" ):
setattr(__A , """key_handler""" , {} )
setattr(__A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
lowerCAmelCase_ :str = getattr(__A , """handle_key""" , [] )
for key in handled_keys:
lowerCAmelCase_ :str = value
return new_cls
@staticmethod
def __lowerCAmelCase ( cls ) -> str:
lowerCAmelCase_ :Optional[int] = get_character()
if char != KEYMAP["undefined"]:
lowerCAmelCase_ :Optional[Any] = ord(__A )
lowerCAmelCase_ :int = cls.key_handler.get(__A )
if handler:
lowerCAmelCase_ :Any = char
return handler(cls )
else:
return None
def _snake_case ( cls : Dict ) -> Tuple:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 84
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271
| 0
|
import requests
lowercase__ = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 352
|
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 12
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=3, __a=32, __a=3, __a=10, __a=[10, 20, 30, 40], __a=[1, 1, 2, 1], __a=True, __a=True, __a="relu", __a=3, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Union[str, Any] = embeddings_size
_lowerCAmelCase : Union[str, Any] = hidden_sizes
_lowerCAmelCase : Union[str, Any] = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : List[Any] = len(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : str = self.get_config()
return config, pixel_values
def snake_case__ ( self):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FlaxRegNetModel(config=__a)
_lowerCAmelCase : str = model(__a)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : Tuple = FlaxRegNetForImageClassification(config=__a)
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FlaxRegNetModelTester(self)
_lowerCAmelCase : Optional[Any] = ConfigTester(self, config_class=__a, has_text_modality=__a)
def snake_case__ ( self):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self):
'''simple docstring'''
return
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(__a)
_lowerCAmelCase : Any = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
def check_hidden_states_output(__a, __a, __a):
_lowerCAmelCase : Any = model_class(__a)
_lowerCAmelCase : Tuple = model(**self._prepare_for_class(__a, __a))
_lowerCAmelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__a), expected_num_stages + 1)
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(__a, __a, __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(__a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowerCAmelCase : Tuple = self._prepare_for_class(__a, __a)
_lowerCAmelCase : List[str] = model_class(__a)
@jax.jit
def model_jitted(__a, **__a):
return model(pixel_values=__a, **__a)
with self.subTest("JIT Enabled"):
_lowerCAmelCase : Dict = model_jitted(**__a).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_lowerCAmelCase : Optional[int] = model_jitted(**__a).to_tuple()
self.assertEqual(len(__a), len(__a))
for jitted_output, output in zip(__a, __a):
self.assertEqual(jitted_output.shape, output.shape)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : str = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=__a, return_tensors="np")
_lowerCAmelCase : str = model(**__a)
# verify the logits
_lowerCAmelCase : List[Any] = (1, 1000)
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : Tuple = jnp.array([-0.4_180, -1.5_051, -3.4_836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3], __a, atol=1E-4))
| 36
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( _a : list , _a : int | None = None , _a : int | None = None ):
if start is None:
snake_case_ : Dict = 0
if end is None:
snake_case_ : Tuple = len(_a ) - 1
if start >= end:
return
snake_case_ : Union[str, Any] = (start + end) // 2
slowsort(_a , _a , _a )
slowsort(_a , mid + 1 , _a )
if sequence[end] < sequence[mid]:
snake_case_ : Any = sequence[mid], sequence[end]
slowsort(_a , _a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364
|
import datasets
from .evaluate import evaluate
lowercase : Dict = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
lowercase : int = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
lowercase : int = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ : Union[str, Any] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
snake_case_ : Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
snake_case_ : Any = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 36
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase = 10 , UpperCAmelCase = 1000 , UpperCAmelCase = True ) -> List[Any]:
assert (
isinstance(A_ , A_ )
and isinstance(A_ , A_ )
and isinstance(A_ , A_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
return int((number_a + number_a) / 2 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
assert (
isinstance(A_ , A_ ) and isinstance(A_ , A_ ) and isinstance(A_ , A_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
snake_case_ = lower
snake_case_ = higher
snake_case_ = []
while True:
snake_case_ = get_avg(A_ , A_ )
last_numbers.append(A_ )
if answer(A_ ) == "low":
snake_case_ = number
elif answer(A_ ) == "high":
snake_case_ = number
else:
break
print(f'guess the number : {last_numbers[-1]}' )
print(f'details : {last_numbers!s}' )
def UpperCAmelCase ( ) -> Tuple:
snake_case_ = int(input('Enter lower value : ' ).strip() )
snake_case_ = int(input('Enter high value : ' ).strip() )
snake_case_ = int(input('Enter value to guess : ' ).strip() )
guess_the_number(A_ , A_ , A_ )
if __name__ == "__main__":
main()
| 69
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A__: List[Any] = logging.get_logger(__name__)
A__: Any = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
A__: Dict = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
A__: Optional[int] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
A__: Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
A__: Optional[Any] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
A__: List[Any] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
A__: int = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
A__: Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
A__: Optional[Any] = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
A__: List[Any] = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
A__: Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
A__: Optional[Any] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
A__: Dict = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
A__: Dict = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
A__: Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A__: List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A__: str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A__: int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A__: str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A__: List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A__: List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A__: int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A__: str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A__: Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A__: Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A__: Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A__: str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A__: Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_MAPPING
A__: int = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A__: Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A__: Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A__: List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A__: int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__: Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A__: Optional[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A__: str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A__: List[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A__: Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A__: Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A__: Dict = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A__: List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 149
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296
| 1
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=0.999 , UpperCamelCase__ : Any="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_UpperCAmelCase : Dict = []
for i in range(UpperCamelCase__ ):
_UpperCAmelCase : List[str] = i / num_diffusion_timesteps
_UpperCAmelCase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class _UpperCAmelCase ( a ,a ):
'''simple docstring'''
a__ =[e.name for e in KarrasDiffusionSchedulers]
a__ =2
@register_to_config
def __init__( self , A = 1_0_0_0 , A = 0.00_085 , A = 0.012 , A = "linear" , A = None , A = "epsilon" , A = False , A = False , A = 1.0 , A = "linspace" , A = 0 , ) -> Optional[int]:
if trained_betas is not None:
_UpperCAmelCase : Optional[Any] = torch.tensor(A , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase : Union[str, Any] = torch.linspace(A , A , A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : str = betas_for_alpha_bar(A , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
_UpperCAmelCase : int = betas_for_alpha_bar(A , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
_UpperCAmelCase : List[str] = 1.0 - self.betas
_UpperCAmelCase : str = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A , A , A )
_UpperCAmelCase : Any = use_karras_sigmas
def __lowerCAmelCase ( self , A , A=None ) -> Optional[int]:
if schedule_timesteps is None:
_UpperCAmelCase : List[str] = self.timesteps
_UpperCAmelCase : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCAmelCase : Any = 1 if len(A ) > 1 else 0
else:
_UpperCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(A ) else timestep
_UpperCAmelCase : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> Any:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self , A , A , ) -> torch.FloatTensor:
_UpperCAmelCase : Tuple = self.index_for_timestep(A )
_UpperCAmelCase : Any = self.sigmas[step_index]
_UpperCAmelCase : Tuple = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self , A , A = None , A = None , ) -> Union[str, Any]:
_UpperCAmelCase : int = num_inference_steps
_UpperCAmelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCAmelCase : Optional[int] = np.linspace(0 , num_train_timesteps - 1 , A , dtype=A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCAmelCase : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : List[Any] = (np.arange(0 , A ) * step_ratio).round()[::-1].copy().astype(A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCAmelCase : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Tuple = (np.arange(A , 0 , -step_ratio )).round().copy().astype(A )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
_UpperCAmelCase : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCAmelCase : Union[str, Any] = np.log(A )
_UpperCAmelCase : str = np.interp(A , np.arange(0 , len(A ) ) , A )
if self.config.use_karras_sigmas:
_UpperCAmelCase : Any = self._convert_to_karras(in_sigmas=A , num_inference_steps=self.num_inference_steps )
_UpperCAmelCase : int = np.array([self._sigma_to_t(A , A ) for sigma in sigmas] )
_UpperCAmelCase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCAmelCase : Dict = torch.from_numpy(A ).to(device=A )
_UpperCAmelCase : Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCAmelCase : Optional[int] = torch.from_numpy(A )
_UpperCAmelCase : List[Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A ).startswith('''mps''' ):
# mps does not support float64
_UpperCAmelCase : List[Any] = timesteps.to(A , dtype=torch.floataa )
else:
_UpperCAmelCase : Tuple = timesteps.to(device=A )
# empty dt and derivative
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCAmelCase : Optional[Any] = defaultdict(A )
def __lowerCAmelCase ( self , A , A ) -> Tuple:
# get log sigma
_UpperCAmelCase : Dict = np.log(A )
# get distribution
_UpperCAmelCase : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_UpperCAmelCase : str = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_UpperCAmelCase : List[str] = low_idx + 1
_UpperCAmelCase : int = log_sigmas[low_idx]
_UpperCAmelCase : Any = log_sigmas[high_idx]
# interpolate sigmas
_UpperCAmelCase : Optional[Any] = (low - log_sigma) / (low - high)
_UpperCAmelCase : Optional[int] = np.clip(A , 0 , 1 )
# transform interpolation to time range
_UpperCAmelCase : List[str] = (1 - w) * low_idx + w * high_idx
_UpperCAmelCase : List[str] = t.reshape(sigma.shape )
return t
def __lowerCAmelCase ( self , A , A ) -> torch.FloatTensor:
_UpperCAmelCase : float = in_sigmas[-1].item()
_UpperCAmelCase : float = in_sigmas[0].item()
_UpperCAmelCase : List[Any] = 7.0 # 7.0 is the value used in the paper
_UpperCAmelCase : Optional[Any] = np.linspace(0 , 1 , A )
_UpperCAmelCase : Any = sigma_min ** (1 / rho)
_UpperCAmelCase : str = sigma_max ** (1 / rho)
_UpperCAmelCase : Optional[int] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowerCAmelCase ( self ) -> Any:
return self.dt is None
def __lowerCAmelCase ( self , A , A , A , A = True , ) -> Union[SchedulerOutput, Tuple]:
_UpperCAmelCase : Union[str, Any] = self.index_for_timestep(A )
# advance index counter by 1
_UpperCAmelCase : int = timestep.cpu().item() if torch.is_tensor(A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCAmelCase : List[Any] = self.sigmas[step_index]
_UpperCAmelCase : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_UpperCAmelCase : Dict = self.sigmas[step_index - 1]
_UpperCAmelCase : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCAmelCase : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase : Any = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_UpperCAmelCase : int = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
_UpperCAmelCase : int = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCAmelCase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCAmelCase : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
_UpperCAmelCase : int = derivative
_UpperCAmelCase : Optional[int] = dt
_UpperCAmelCase : int = sample
else:
# 2. 2nd order / Heun's method
_UpperCAmelCase : List[Any] = (sample - pred_original_sample) / sigma_next
_UpperCAmelCase : str = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_UpperCAmelCase : Dict = self.dt
_UpperCAmelCase : Tuple = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : int = None
_UpperCAmelCase : int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def __lowerCAmelCase ( self , A , A , A , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_UpperCAmelCase : Dict = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A ):
# mps does not support float64
_UpperCAmelCase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCAmelCase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCAmelCase : Union[str, Any] = self.timesteps.to(original_samples.device )
_UpperCAmelCase : Dict = timesteps.to(original_samples.device )
_UpperCAmelCase : Optional[Any] = [self.index_for_timestep(A , A ) for t in timesteps]
_UpperCAmelCase : str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCAmelCase : Optional[int] = sigma.unsqueeze(-1 )
_UpperCAmelCase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Any:
return self.config.num_train_timesteps
| 263
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''mgp-str'''
def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]:
super().__init__(**A )
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Dict = max_token_length
_UpperCAmelCase : Optional[Any] = num_character_labels
_UpperCAmelCase : int = num_bpe_labels
_UpperCAmelCase : List[str] = num_wordpiece_labels
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : List[str] = distilled
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : str = drop_rate
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Union[str, Any] = output_aa_attentions
_UpperCAmelCase : List[str] = initializer_range
| 263
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
_a = BlipImageProcessor()
_a = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
_a = BlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict , **lowerCAmelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Optional[int] ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_a = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_a = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_a = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = self.prepare_image_inputs()
_a = image_processor(lowerCAmelCase_ , return_tensors='''np''' )
_a = processor(images=lowerCAmelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = processor(text=lowerCAmelCase_ )
_a = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.batch_decode(lowerCAmelCase_ )
_a = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_a = '''lower newer'''
_a = self.prepare_image_inputs()
_a = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 179
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[str] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_snake_case : int = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_snake_case : int = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A ( _a ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = BertTokenizer
def __init__( self : str , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Tuple="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**lowerCAmelCase_ )
_a = do_lower_case
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int=None ) -> List[str]:
"""simple docstring"""
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 179
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ['BeitFeatureExtractor']
UpperCAmelCase__ : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12
| 0
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
_lowerCAmelCase : List[Any] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" ,lowercase__ )
if matches:
_lowerCAmelCase : Optional[int] = float(matches[1] )
_lowerCAmelCase : Dict = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCAmelCase : Union[str, Any] = 1001
_lowerCAmelCase : Dict = """imagenet-1k-id2label.json"""
_lowerCAmelCase : List[str] = """huggingface/label-files"""
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(lowercase__ ,lowercase__ ,repo_type="""dataset""" ) ,"""r""" ) )
_lowerCAmelCase : Tuple = {int(lowercase__ ) + 1: v for k, v in idalabel.items()}
_lowerCAmelCase : str = """background"""
_lowerCAmelCase : Union[str, Any] = idalabel
_lowerCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : Optional[int] = Image.open(requests.get(lowercase__ ,stream=lowercase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str=False ) -> Union[str, Any]:
_lowerCAmelCase : Dict = get_mobilenet_va_config(lowercase__ )
# Load 🤗 model
_lowerCAmelCase : str = MobileNetVaForImageClassification(lowercase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase__ ,lowercase__ ,lowercase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCAmelCase : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} ,size={"""shortest_edge""": config.image_size + 32} ,)
_lowerCAmelCase : List[str] = image_processor(images=prepare_img() ,return_tensors="""pt""" )
_lowerCAmelCase : int = model(**lowercase__ )
_lowerCAmelCase : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
_lowerCAmelCase : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCAmelCase : Optional[int] = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
_lowerCAmelCase : List[str] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] ,lowercase__ ,atol=1e-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
_lowerCAmelCase : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowercase__ )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a : Optional[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 351
|
"""simple docstring"""
import socket
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[int] = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Tuple = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" ,"""wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_lowerCAmelCase : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 126
| 0
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCamelCase ( __UpperCamelCase = 8 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
i -= len(_lowerCamelCase )
lowerCAmelCase_ : int = i // 3
lowerCAmelCase_ : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase_ : Union[str, Any] = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
lowerCAmelCase_ : Tuple = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase = 8 ) -> str:
"""simple docstring"""
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase_ : Tuple = any(char in ascii_uppercase for char in password )
lowerCAmelCase_ : Optional[int] = any(char in ascii_lowercase for char in password )
lowerCAmelCase_ : List[str] = any(char in digits for char in password )
lowerCAmelCase_ : Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = int(input("Please indicate the max length of your password: " ).strip() )
lowerCAmelCase_ : Optional[Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 241
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : List[str] = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : Tuple = scope
_lowerCAmelCase : str = range_bbox
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Dict = bbox[i, j, 3]
_lowerCAmelCase : int = bbox[i, j, 1]
_lowerCAmelCase : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : List[Any] = bbox[i, j, 0]
_lowerCAmelCase : str = t
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = LiltModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a)
_lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a)
_lowerCAmelCase : List[Any] = model(__a, bbox=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = LiltModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Any = type
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LiltModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a)
_lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a)
_lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a)
_lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768])
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, )
self.assertTrue(outputs.last_hidden_state.shape, __a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
| 36
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
def __init__( self : int , snake_case : Optional[Any] , snake_case : int=1_3 , snake_case : int=7 , snake_case : List[Any]=True , snake_case : List[str]=True , snake_case : Tuple=True , snake_case : Optional[Any]=True , snake_case : int=9_9 , snake_case : Optional[Any]=2_4 , snake_case : Union[str, Any]=2 , snake_case : Union[str, Any]=6 , snake_case : Union[str, Any]=3_7 , snake_case : Union[str, Any]="gelu" , snake_case : str=0.1 , snake_case : int=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : Optional[Any]=1_6 , snake_case : Optional[int]=2 , snake_case : Optional[int]=0.02 , snake_case : Tuple=3 , snake_case : Dict=None , snake_case : str=1_0_0_0 , ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = parent
UpperCamelCase_ : int = batch_size
UpperCamelCase_ : Optional[Any] = seq_length
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Optional[int] = use_input_mask
UpperCamelCase_ : Optional[Any] = use_token_type_ids
UpperCamelCase_ : str = use_labels
UpperCamelCase_ : Tuple = vocab_size
UpperCamelCase_ : int = hidden_size
UpperCamelCase_ : Dict = num_hidden_layers
UpperCamelCase_ : Dict = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Optional[Any] = hidden_act
UpperCamelCase_ : Any = hidden_dropout_prob
UpperCamelCase_ : List[str] = attention_probs_dropout_prob
UpperCamelCase_ : Union[str, Any] = max_position_embeddings
UpperCamelCase_ : List[str] = type_vocab_size
UpperCamelCase_ : int = type_sequence_label_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Dict = num_labels
UpperCamelCase_ : int = scope
UpperCamelCase_ : int = range_bbox
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase_ : Tuple = bbox[i, j, 3]
UpperCamelCase_ : Any = bbox[i, j, 1]
UpperCamelCase_ : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase_ : str = bbox[i, j, 2]
UpperCamelCase_ : Dict = bbox[i, j, 0]
UpperCamelCase_ : Tuple = t
UpperCamelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : int = None
UpperCamelCase_ : List[str] = None
if self.use_labels:
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Dict , snake_case : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str] , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = LiltModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Dict = model(snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
UpperCamelCase_ : Any = model(snake_case , bbox=snake_case , token_type_ids=snake_case )
UpperCamelCase_ : Any = model(snake_case , bbox=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Any , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Optional[Any] , ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.num_labels
UpperCamelCase_ : Tuple = LiltForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : str = model(
snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Optional[int] , snake_case : Dict , snake_case : Tuple , snake_case : str , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = LiltForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : Union[str, Any] = model(
snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
) : List[str] = config_and_inputs
UpperCamelCase_ : Any = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = LiltModelTester(self )
UpperCamelCase_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[Any] = LiltModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@slow
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(snake_case )
UpperCamelCase_ : List[str] = torch.tensor([[1, 2]] , device=snake_case )
UpperCamelCase_ : Union[str, Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Dict = model(input_ids=snake_case , bbox=snake_case )
UpperCamelCase_ : Dict = torch.Size([1, 2, 7_6_8] )
UpperCamelCase_ : Tuple = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=snake_case , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case , atol=1e-3 ) )
| 50
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
a_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowercase ( datasets.BuilderConfig ):
lowercase = None
lowercase = "utf-8"
lowercase = None
lowercase = None
lowercase = True # deprecated
lowercase = None # deprecated
lowercase = 1_0 << 2_0 # 10MB
lowercase = None
class _lowercase ( datasets.ArrowBasedBuilder ):
lowercase = JsonConfig
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
UpperCamelCase_ : Any = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCamelCase_ : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
UpperCamelCase_ : int = data_files
if isinstance(snake_case , snake_case ):
UpperCamelCase_ : Tuple = [files]
UpperCamelCase_ : Union[str, Any] = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase_ : str = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
UpperCamelCase_ : Dict = [files]
UpperCamelCase_ : List[str] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase_ : int = self.config.features.arrow_schema.field(snake_case ).type
UpperCamelCase_ : Optional[int] = pa_table.append_column(snake_case , pa.array([None] * len(snake_case ) , type=snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_ : Optional[int] = table_cast(snake_case , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : List[Any] ) -> Dict:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_ : List[Any] = json.load(snake_case )
# We keep only the field we are interested in
UpperCamelCase_ : int = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case , (list, tuple) ):
UpperCamelCase_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_ : Dict = {col: [row.get(snake_case ) for row in dataset] for col in keys}
else:
UpperCamelCase_ : Tuple = dataset
UpperCamelCase_ : str = pa.Table.from_pydict(snake_case )
yield file_idx, self._cast_table(snake_case )
# If the file has one json object per line
else:
with open(snake_case , 'rb' ) as f:
UpperCamelCase_ : List[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase_ : Any = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
UpperCamelCase_ : Optional[int] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
UpperCamelCase_ : List[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase_ : Tuple = batch.decode(self.config.encoding , errors=snake_case ).encode('utf-8' )
try:
while True:
try:
UpperCamelCase_ : List[str] = paj.read_json(
io.BytesIO(snake_case ) , read_options=paj.ReadOptions(block_size=snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case , pa.ArrowInvalid )
and "straddling" not in str(snake_case )
or block_size > len(snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(snake_case )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_ : Union[str, Any] = json.load(snake_case )
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case , snake_case ): # list is the only sequence type supported in JSON
try:
UpperCamelCase_ : List[Any] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_ : Union[str, Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
UpperCamelCase_ : List[str] = pa.Table.from_pydict(snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" )
raise ValueError(f"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(snake_case )
break
else:
logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" )
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
batch_idx += 1
| 50
| 1
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10**-10 ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = a
while True:
SCREAMING_SNAKE_CASE = Decimal(_SCREAMING_SNAKE_CASE ) - (
Decimal(eval(_SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(_SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(_SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 296
|
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0:
raise ValueError("""Invalid input""" )
SCREAMING_SNAKE_CASE = 10**n
SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(1_0) = }''')
| 296
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '▁'
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
SCREAMING_SNAKE_CASE__ = {
'google/pegasus-xsum': 5_1_2,
}
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PegasusTokenizer
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=103 , **UpperCAmelCase , ) -> Any:
'''simple docstring'''
lowercase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
F'additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'
F' {type(_SCREAMING_SNAKE_CASE )}' )
lowercase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowercase_ = additional_special_tokens_extended
else:
lowercase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
def A__ ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
"""simple docstring"""
def __lowercase ( snake_case_ : int = 100 ) ->int:
'''simple docstring'''
__A : Optional[int] = (n * (n + 1) // 2) ** 2
__A : Optional[Any] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 179
|
"""simple docstring"""
def __lowercase ( snake_case_ : int ) ->int:
'''simple docstring'''
assert (
isinstance(snake_case_ ,snake_case_ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
__A , __A : List[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
__A , __A : List[str] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
def get_matched_characters(_A , _A ) -> str:
a : str = []
a : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
a : List[Any] = int(max(0 , i - limit ) )
a : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_a )
a : int = f"""{_stra[0:_stra.index(_a )]} {_stra[_stra.index(_a ) + 1:]}"""
return "".join(_a )
# matching characters
a : Tuple = get_matched_characters(_a , _a )
a : Optional[Any] = get_matched_characters(_a , _a )
a : str = len(_a )
# transposition
a : Dict = (
len([(ca, ca) for ca, ca in zip(_a , _a ) if ca != ca] ) // 2
)
if not match_count:
a : int = 0.0
else:
a : List[str] = (
1
/ 3
* (
match_count / len(_a )
+ match_count / len(_a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a : List[str] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 365
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__:
def __init__( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int]=2 , __snake_case : Union[str, Any]=8 , __snake_case : List[str]=True , __snake_case : Dict=True , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : Tuple=99 , __snake_case : int=16 , __snake_case : Optional[int]=5 , __snake_case : int=2 , __snake_case : Tuple=36 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Tuple=5_12 , __snake_case : str=16 , __snake_case : str=2 , __snake_case : int=0.02 , __snake_case : Optional[int]=3 , __snake_case : List[Any]=4 , __snake_case : Any=None , ):
a : int = parent
a : Any = batch_size
a : Optional[int] = seq_length
a : List[str] = is_training
a : Dict = use_input_mask
a : Union[str, Any] = use_token_type_ids
a : Tuple = use_labels
a : Dict = vocab_size
a : Optional[int] = hidden_size
a : List[Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : str = intermediate_size
a : Dict = hidden_act
a : str = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Tuple = type_vocab_size
a : int = type_sequence_label_size
a : List[Any] = initializer_range
a : List[str] = num_labels
a : List[str] = num_choices
a : Optional[Any] = scope
def lowercase_ ( self : Union[str, Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[Any] = None
if self.use_input_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Tuple = None
if self.use_token_type_ids:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : str = None
a : int = None
a : Any = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Union[str, Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def lowercase_ ( self : List[str] ):
a : List[Any] = self.get_config()
a : Optional[Any] = 3_00
return config
def lowercase_ ( self : Union[str, Any] ):
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
a : Union[str, Any] = True
a : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Any ):
a : Dict = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
a : List[str] = model(__snake_case , token_type_ids=__snake_case )
a : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : List[str] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , ):
a : Optional[Any] = True
a : Optional[int] = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
a : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
a : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
a : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Optional[Any] ):
a : Union[str, Any] = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : int ):
a : Optional[int] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Dict , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : str ):
a : Tuple = self.num_labels
a : Dict = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a : Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : int ):
a : Tuple = self.num_labels
a : Tuple = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
a : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : Any , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any , __snake_case : str ):
a : Optional[int] = self.num_choices
a : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : int = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = ()
def lowercase_ ( self : Any ):
a : Tuple = MraModelTester(self )
a : str = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : Any ):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : Dict = type
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowercase_ ( self : Tuple ):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase_ ( self : Union[str, Any] ):
return
@require_torch
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
a : List[str] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
a : Optional[int] = model(__snake_case )[0]
a : Any = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __snake_case )
a : str = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : Optional[int] ):
a : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
a : Optional[int] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
a : Dict = model(__snake_case )[0]
a : Union[str, Any] = 5_02_65
a : Dict = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __snake_case )
a : Dict = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowercase_ ( self : Any ):
a : Dict = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
a : Optional[int] = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
a : Tuple = model(__snake_case )[0]
a : List[Any] = 5_02_65
a : str = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __snake_case )
a : int = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
| 96
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """pix2struct_text_model"""
SCREAMING_SNAKE_CASE_ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Dict , lowerCamelCase_ :Tuple=50_244 , lowerCamelCase_ :List[Any]=768 , lowerCamelCase_ :Dict=64 , lowerCamelCase_ :Any=2_048 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Tuple=12 , lowerCamelCase_ :Dict=32 , lowerCamelCase_ :Tuple=128 , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :List[Any]=1e-6 , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :int="gelu_new" , lowerCamelCase_ :Any=0 , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :List[Any]=True , **lowerCamelCase_ :Any , ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =vocab_size
lowerCamelCase__ : Optional[Any] =hidden_size
lowerCamelCase__ : Tuple =d_kv
lowerCamelCase__ : Union[str, Any] =d_ff
lowerCamelCase__ : Tuple =num_layers
lowerCamelCase__ : Optional[int] =num_heads
lowerCamelCase__ : str =relative_attention_num_buckets
lowerCamelCase__ : List[str] =relative_attention_max_distance
lowerCamelCase__ : Optional[Any] =dropout_rate
lowerCamelCase__ : List[Any] =layer_norm_epsilon
lowerCamelCase__ : str =initializer_factor
lowerCamelCase__ : Optional[Any] =use_cache
lowerCamelCase__ : Tuple =eos_token_id
lowerCamelCase__ : List[str] =decoder_start_token_id
# for backwards compatibility
lowerCamelCase__ : int =dense_act_fn
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , is_decoder=lowerCamelCase_ , **lowerCamelCase_ , )
@classmethod
def UpperCAmelCase__ ( cls :Union[str, Any] , lowerCamelCase_ :Union[str, os.PathLike] , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : str =cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
lowerCamelCase__ : Dict =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """pix2struct_vision_model"""
def __init__( self :Optional[Any] , lowerCamelCase_ :List[Any]=768 , lowerCamelCase_ :Optional[Any]=768 , lowerCamelCase_ :str=2_048 , lowerCamelCase_ :Tuple=64 , lowerCamelCase_ :int=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Any="gelu_new" , lowerCamelCase_ :Dict=1e-6 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Tuple=1e-10 , lowerCamelCase_ :int=1.0 , lowerCamelCase_ :List[str]=4_096 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :List[str]=128 , **lowerCamelCase_ :Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Any =hidden_size
lowerCamelCase__ : Optional[int] =patch_embed_hidden_size
lowerCamelCase__ : Optional[int] =d_ff
lowerCamelCase__ : Dict =dropout_rate
lowerCamelCase__ : Union[str, Any] =num_hidden_layers
lowerCamelCase__ : int =num_attention_heads
lowerCamelCase__ : str =initializer_range
lowerCamelCase__ : Union[str, Any] =initializer_factor
lowerCamelCase__ : Union[str, Any] =attention_dropout
lowerCamelCase__ : int =layer_norm_eps
lowerCamelCase__ : Optional[int] =dense_act_fn
lowerCamelCase__ : Union[str, Any] =seq_len
lowerCamelCase__ : Any =relative_attention_num_buckets
lowerCamelCase__ : str =relative_attention_max_distance
lowerCamelCase__ : Optional[Any] =d_kv
@classmethod
def UpperCAmelCase__ ( cls :List[str] , lowerCamelCase_ :Union[str, os.PathLike] , **lowerCamelCase_ :Tuple ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
lowerCamelCase__ : List[Any] =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """pix2struct"""
SCREAMING_SNAKE_CASE_ = True
def __init__( self :Dict , lowerCamelCase_ :List[Any]=None , lowerCamelCase_ :str=None , lowerCamelCase_ :Tuple=1.0 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :int=False , lowerCamelCase_ :Optional[int]=True , **lowerCamelCase_ :Union[str, Any] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
if text_config is None:
lowerCamelCase__ : int ={}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
lowerCamelCase__ : str ={}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
lowerCamelCase__ : str =PixaStructTextConfig(**lowerCamelCase_ )
lowerCamelCase__ : Tuple =PixaStructVisionConfig(**lowerCamelCase_ )
lowerCamelCase__ : int =self.text_config.decoder_start_token_id
lowerCamelCase__ : Union[str, Any] =self.text_config.pad_token_id
lowerCamelCase__ : Tuple =self.text_config.eos_token_id
lowerCamelCase__ : int =initializer_factor
lowerCamelCase__ : Optional[int] =initializer_range
lowerCamelCase__ : Tuple =self.initializer_range
lowerCamelCase__ : Optional[Any] =self.initializer_range
lowerCamelCase__ : List[Any] =is_vqa
@classmethod
def UpperCAmelCase__ ( cls :int , lowerCamelCase_ :PixaStructTextConfig , lowerCamelCase_ :PixaStructVisionConfig , **lowerCamelCase_ :List[str] ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : List[str] =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Any =self.text_config.to_dict()
lowerCamelCase__ : str =self.vision_config.to_dict()
lowerCamelCase__ : int =self.__class__.model_type
return output
| 126
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AlbertTokenizer
SCREAMING_SNAKE_CASE_ = AlbertTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[int] =AlbertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Dict ='this is a test'
lowerCamelCase__ : Union[str, Any] ='this is a test'
return input_text, output_text
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Any ='<pad>'
lowerCamelCase__ : int =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(lowerCamelCase_ ) , 30_000 )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : str =self.get_tokenizer()
lowerCamelCase__ : int =self.get_rust_tokenizer()
lowerCamelCase__ : Optional[Any] ='I was born in 92000, and this is falsé.'
lowerCamelCase__ : str =tokenizer.tokenize(lowerCamelCase_ )
lowerCamelCase__ : Any =rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Any =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Any =rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =self.get_rust_tokenizer()
lowerCamelCase__ : List[str] =tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : int =rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =AlbertTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCamelCase__ : List[Any] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase_ , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [48, 25, 21, 1_289] )
lowerCamelCase__ : Union[str, Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCamelCase__ : List[str] =tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase__ : Optional[int] =tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =AlbertTokenizer(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =tokenizer.encode('sequence builders' )
lowerCamelCase__ : Optional[Any] =tokenizer.encode('multi-sequence build' )
lowerCamelCase__ : str =tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCamelCase__ : Dict =tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Dict ={'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 126
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( A__ , unittest.TestCase ):
A__ = DiTPipeline
A__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A__ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A__ = False
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=_a , )
_SCREAMING_SNAKE_CASE =AutoencoderKL()
_SCREAMING_SNAKE_CASE =DDIMScheduler()
_SCREAMING_SNAKE_CASE ={'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def A ( self : int , _a : str , _a : Union[str, Any]=0 ) -> str:
'''simple docstring'''
if str(_a ).startswith('mps' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='cpu'
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_SCREAMING_SNAKE_CASE =np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
_SCREAMING_SNAKE_CASE =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def A ( self : Dict ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_SCREAMING_SNAKE_CASE =['vase', 'umbrella', 'white shark', 'white wolf']
_SCREAMING_SNAKE_CASE =pipe.get_label_ids(_a )
_SCREAMING_SNAKE_CASE =pipe(_a , generator=_a , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_a , _a ):
_SCREAMING_SNAKE_CASE =load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A ( self : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_SCREAMING_SNAKE_CASE =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_SCREAMING_SNAKE_CASE =['vase', 'umbrella']
_SCREAMING_SNAKE_CASE =pipe.get_label_ids(_a )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =pipe(_a , generator=_a , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_a , _a ):
_SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 114
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int = 50 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 114
| 1
|
import baseaa
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
return baseaa.baadecode(_UpperCAmelCase ).decode('utf-8' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = """Hello World!"""
_UpperCAmelCase : int = baseaa_encode(test)
print(encoded)
_UpperCAmelCase : Optional[int] = baseaa_decode(encoded)
print(decoded)
| 50
|
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase = Features({'audio': Audio()} )
lowerCamelCase = Features({'transcription': Value('string' )} )
lowerCamelCase = "audio"
lowerCamelCase = "transcription"
def snake_case__ ( self : Optional[int],lowercase_ : Any )-> List[Any]:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column],lowercase_ ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
A__ = copy.deepcopy(self )
A__ = self.input_schema.copy()
A__ = features[self.audio_column]
A__ = input_schema
return task_template
@property
def snake_case__ ( self : Optional[int] )-> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 282
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any],lowercase_ : str )-> List[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'],model_result['ss'] ):
A__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
A__ = 'sgugger/tiny-distilbert-classification'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,only_pretrain_model=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,torchscript=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu','Cant do half precision' )
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,fpaa=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu','Can\'t do half precision' )
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],fpaa=lowercase_,multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,save_to_csv=lowercase_,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(lowercase_,'inf_time.csv' ),train_memory_csv_file=os.path.join(lowercase_,'train_mem.csv' ),inference_memory_csv_file=os.path.join(lowercase_,'inf_mem.csv' ),train_time_csv_file=os.path.join(lowercase_,'train_time.csv' ),env_info_csv_file=os.path.join(lowercase_,'env.csv' ),multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'env.csv' ) ).exists() )
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ : Optional[Any] ):
self.assertTrue(hasattr(lowercase_,'sequential' ) )
self.assertTrue(hasattr(lowercase_,'cumulative' ) )
self.assertTrue(hasattr(lowercase_,'current' ) )
self.assertTrue(hasattr(lowercase_,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(lowercase_,'log.txt' ),log_print=lowercase_,trace_memory_line_by_line=lowercase_,multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_,'log.txt' ) ).exists() )
| 282
| 1
|
def A (__A : str , __A : int ) -> list:
"""simple docstring"""
UpperCAmelCase_ = word.split()
def justify(__A : list , __A : int , __A : int ) -> str:
UpperCAmelCase_ = max_width - width
UpperCAmelCase_ = len(__A )
if len(__A ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase_ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase_ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase_ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__A ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase_ = []
for i in range(__A ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__A )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for word in words:
if width + len(__A ) + len(__A ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__A )
width += len(__A )
else:
# justify the line and add it to result
answer.append(justify(__A , __A , __A ) )
# reset new line and new width
UpperCAmelCase_ , UpperCAmelCase_ = [word], len(__A )
UpperCAmelCase_ = max_width - width - len(__A )
answer.append(''' '''.join(__A ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 51
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
from __future__ import annotations
__UpperCAmelCase = [True] * 1_00_00_01
__UpperCAmelCase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
__UpperCAmelCase = False
i += 1
def __UpperCamelCase ( lowercase__ : int ) -> bool:
'''simple docstring'''
return seive[n]
def __UpperCamelCase ( lowercase__ : int ) -> bool:
'''simple docstring'''
return any(digit in """02468""" for digit in str(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : int = 1000000 ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowercase__ ) and not contains_an_even_digit(lowercase__ ):
lowerCAmelCase_ : Tuple = str(lowercase__ )
lowerCAmelCase_ : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowercase__ ) )]
if all(is_prime(lowercase__ ) for i in list_nums ):
result.append(lowercase__ )
return result
def __UpperCamelCase ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 354
|
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 0
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A : List[str] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
require_version(deps[pkg] , _UpperCamelCase )
| 57
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DDIMPipeline
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ = False
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_lowerCamelCase : List[str] = DDIMScheduler()
_lowerCamelCase : Optional[int] = {'unet': unet, 'scheduler': scheduler}
return components
def A_ ( self , lowercase , lowercase=0 ):
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : Dict = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : Tuple = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A_ ( self ):
_lowerCamelCase : Any = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : str = self.get_dummy_inputs(lowercase )
_lowerCamelCase : int = pipe(**lowercase ).images
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCamelCase : Tuple = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCamelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase , 1E-3 )
def A_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'google/ddpm-cifar10-32'
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : Dict = DDIMScheduler()
_lowerCamelCase : Dict = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddim.to(lowercase )
ddim.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = ddim(generator=lowercase , eta=0.0 , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCamelCase : List[Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self ):
_lowerCamelCase : Optional[int] = 'google/ddpm-ema-bedroom-256'
_lowerCamelCase : str = UNetaDModel.from_pretrained(lowercase )
_lowerCamelCase : str = DDIMScheduler.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = DDIMPipeline(unet=lowercase , scheduler=lowercase )
ddpm.to(lowercase )
ddpm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = ddpm(generator=lowercase , output_type='numpy' ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : str = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 96
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = ["image_processor", "tokenizer"]
lowerCAmelCase_ : Optional[int] = "Pix2StructImageProcessor"
lowerCAmelCase_ : Union[str, Any] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[Any] = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = 2048 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Any , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase : List[str] = self.tokenizer
lowerCAmelCase : int = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase : List[str] = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , **UpperCAmelCase_ )
else:
# add pixel_values and bbox
lowerCAmelCase : List[Any] = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , max_patches=UpperCAmelCase_ , header_text=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase : Dict = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if "attention_mask" in text_encoding:
lowerCAmelCase : Optional[Any] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
lowerCAmelCase : str = text_encoding.pop('input_ids' )
else:
lowerCAmelCase : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def lowercase__ ( self : List[Any] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Tuple ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.tokenizer.model_input_names
lowerCAmelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 323
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323
| 1
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : int ) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self : Any ) -> Any:
__UpperCAmelCase : List[str] = ort.SessionOptions()
__UpperCAmelCase : List[str] = False
return options
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__UpperCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
__UpperCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Optional[Any] = """A red cat sitting on a park bench"""
__UpperCAmelCase : List[str] = np.random.RandomState(0 )
__UpperCAmelCase : Union[str, Any] = pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 114
|
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : str = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
__UpperCAmelCase : Any = factorials.pop()
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_snake_case = 0
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_snake_case = tuple[int, int]
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict , ) -> Any:
__UpperCAmelCase : Union[str, Any] = pos_x
__UpperCAmelCase : Union[str, Any] = pos_y
__UpperCAmelCase : Tuple = (pos_y, pos_x)
__UpperCAmelCase : Union[str, Any] = goal_x
__UpperCAmelCase : str = goal_y
__UpperCAmelCase : List[Any] = g_cost
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Union[str, Any] = self.calculate_heuristic()
__UpperCAmelCase : List[str] = self.g_cost + self.h_cost
def _lowerCamelCase ( self: Any ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.pos_x - self.goal_x
__UpperCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__SCREAMING_SNAKE_CASE ) + abs(__SCREAMING_SNAKE_CASE )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: str , __lowerCamelCase: List[str] ) -> List[str]:
return self.f_cost < other.f_cost
class _snake_case :
def __init__( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Tuple = [self.start]
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = False
def _lowerCamelCase ( self: str ) -> Optional[int]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__SCREAMING_SNAKE_CASE )
self.closed_nodes.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Union[str, Any] = self.get_successors(__SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__UpperCAmelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
return [self.start.pos]
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Any ) -> List[Any]:
__UpperCAmelCase : int = []
for action in delta:
__UpperCAmelCase : Union[str, Any] = parent.pos_x + action[1]
__UpperCAmelCase : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __SCREAMING_SNAKE_CASE , ) )
return successors
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = node
__UpperCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase : int = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: int ) -> List[str]:
__UpperCAmelCase : int = AStar(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Dict = AStar(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Tuple = False
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCAmelCase : Optional[Any] = self.fwd_astar.open_nodes.pop(0 )
__UpperCAmelCase : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.fwd_astar.closed_nodes.append(__SCREAMING_SNAKE_CASE )
self.bwd_astar.closed_nodes.append(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : str = current_bwd_node
__UpperCAmelCase : Dict = current_fwd_node
__UpperCAmelCase : Optional[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(__SCREAMING_SNAKE_CASE ),
self.bwd_astar: self.bwd_astar.get_successors(__SCREAMING_SNAKE_CASE ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__UpperCAmelCase : Union[str, Any] = astar.open_nodes.pop(
astar.open_nodes.index(__SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
astar.open_nodes.append(__SCREAMING_SNAKE_CASE )
return [self.fwd_astar.start.pos]
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : List[str] = self.fwd_astar.retrace_path(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(__SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = AStar(init, goal)
_snake_case = a_star.search()
_snake_case = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
_snake_case = time.time()
_snake_case = BidirectionalAStar(init, goal)
_snake_case = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 352
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342
| 0
|
import random
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@staticmethod
def A ( lowercase : str ):
'''simple docstring'''
_snake_case = [ord(lowercase ) for i in text]
_snake_case = []
_snake_case = []
for i in plain:
_snake_case = random.randint(1 , 300 )
_snake_case = (i + k) * k
cipher.append(lowercase )
key.append(lowercase )
return cipher, key
@staticmethod
def A ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = []
for i in range(len(lowercase ) ):
_snake_case = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase ) )
return "".join(lowercase )
if __name__ == "__main__":
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 282
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
| 282
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase : str = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[Any] = "ernie_m"
A : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : str , UpperCAmelCase_ : int = 2_5_0_0_0_2 , UpperCAmelCase_ : int = 7_6_8 , UpperCAmelCase_ : int = 1_2 , UpperCAmelCase_ : int = 1_2 , UpperCAmelCase_ : int = 3_0_7_2 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 5_1_4 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 1e-05 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=0.0 , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : int = vocab_size
a : Dict = hidden_size
a : Optional[int] = num_hidden_layers
a : Any = num_attention_heads
a : Tuple = intermediate_size
a : Union[str, Any] = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Union[str, Any] = max_position_embeddings
a : Dict = initializer_range
a : Optional[Any] = layer_norm_eps
a : Any = classifier_dropout
a : List[str] = is_decoder
a : Dict = act_dropout
| 345
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345
| 1
|
from __future__ import annotations
import math
class lowercase :
def __init__( self , _a ) -> List[Any]:
_A : Optional[Any] = size
# approximate the overall size of segment tree with given value
_A : int = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_A : Tuple = [0 for i in range(0 , 4 * size )]
_A : Optional[Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def a__ ( self , _a ) -> Any:
return idx * 2
def a__ ( self , _a ) -> int:
return idx * 2 + 1
def a__ ( self , _a , _a , _a , _a ) -> List[Any]:
if left_element == right_element:
_A : Union[str, Any] = a[left_element - 1]
else:
_A : Any = (left_element + right_element) // 2
self.build(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.build(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ )
_A : List[str] = max(
self.segment_tree[self.left(UpperCamelCase__ )] , self.segment_tree[self.right(UpperCamelCase__ )] )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> Optional[Any]:
if self.flag[idx] is True:
_A : Tuple = self.lazy[idx]
_A : List[Any] = False
if left_element != right_element:
_A : Tuple = self.lazy[idx]
_A : Tuple = self.lazy[idx]
_A : Dict = True
_A : List[str] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_A : Any = val
if left_element != right_element:
_A : Union[str, Any] = val
_A : Optional[Any] = val
_A : str = True
_A : Any = True
return True
_A : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.update(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = max(
self.segment_tree[self.left(UpperCamelCase__ )] , self.segment_tree[self.right(UpperCamelCase__ )] )
return True
def a__ ( self , _a , _a , _a , _a , _a ) -> int:
if self.flag[idx] is True:
_A : Dict = self.lazy[idx]
_A : Tuple = False
if left_element != right_element:
_A : str = self.lazy[idx]
_A : Union[str, Any] = self.lazy[idx]
_A : str = True
_A : Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_A : Optional[Any] = (left_element + right_element) // 2
_A : int = self.query(self.left(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : List[Any] = self.query(self.right(UpperCamelCase__ ) , mid + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return max(UpperCamelCase__ , UpperCamelCase__ )
def __str__( self ) -> Any:
return str([self.query(1 , 1 , self.size , UpperCamelCase__ , UpperCamelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_snake_case = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_snake_case = 15
_snake_case = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 26
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28
| 0
|
from __future__ import annotations
__A = 10
def __a ( lowerCAmelCase_ : list[int] ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_= 1
UpperCAmelCase_= max(lowerCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCAmelCase_= [[] for _ in range(lowerCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCAmelCase_= int((i / placement) % RADIX )
buckets[tmp].append(lowerCAmelCase_ )
# put each buckets' contents into list_of_ints
UpperCAmelCase_= 0
for b in range(lowerCAmelCase_ ):
for i in buckets[b]:
UpperCAmelCase_= i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( snake_case__):
"""simple docstring"""
a__ : str = ["vqvae"]
def __init__( self : List[Any] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1_000
@torch.no_grad()
def __call__( self : List[Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : Union[str, Any]=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
UpperCAmelCase_= steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
UpperCAmelCase_= step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase_= (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase_= randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
UpperCAmelCase_= noise
UpperCAmelCase_= None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.mel.audio_slice_to_image(__UpperCAmelCase )
UpperCAmelCase_= np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase_= (input_image / 255) * 2 - 1
UpperCAmelCase_= torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase_= self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
UpperCAmelCase_= self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase_= self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase_= (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase_= int(mask_start_secs * pixels_per_second )
UpperCAmelCase_= int(mask_end_secs * pixels_per_second )
UpperCAmelCase_= self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["""sample"""]
else:
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase )["""sample"""]
if isinstance(self.scheduler , __UpperCAmelCase ):
UpperCAmelCase_= self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["""prev_sample"""]
else:
UpperCAmelCase_= self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
UpperCAmelCase_= mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase_= mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase_= 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase_= self.vqvae.decode(__UpperCAmelCase )["""sample"""]
UpperCAmelCase_= (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_= images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase_= (images * 255).round().astype("""uint8""" )
UpperCAmelCase_= list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
UpperCAmelCase_= [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
UpperCAmelCase_= np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase_= (sample / 255) * 2 - 1
UpperCAmelCase_= torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase_= t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase_= self.scheduler.alphas_cumprod[t]
UpperCAmelCase_= (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase_= 1 - alpha_prod_t
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase )["""sample"""]
UpperCAmelCase_= (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase_= (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase_= sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ) -> torch.Tensor:
UpperCAmelCase_= acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 277
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ = '''Pix2StructImageProcessor'''
SCREAMING_SNAKE_CASE__ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = False
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : str , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = 20_48 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , **lowerCamelCase_ : Any , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE : str = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , **lowerCamelCase_ )
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE : int = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_patches=lowerCamelCase_ , header_text=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE : List[str] = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE : Union[str, Any] = text_encoding.pop("""input_ids""" )
else:
SCREAMING_SNAKE_CASE : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_ )
return encoding_image_processor
def lowerCamelCase_ ( self : List[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : int , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 323
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCAmelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : int , __A : List[str]=10_24 , __A : Tuple=10_24 , __A : int=False , **__A : Dict ) -> Tuple:
"""simple docstring"""
a_ : Any = AutoTokenizer.from_pretrained(__A )
a_ : Union[str, Any] = SeqaSeqDataset(__A , __A , __A , __A , type_path='train' , **__A )
a_ : int = tok.pad_token_id
def get_lens(__A : Optional[int] ):
a_ : int = tqdm(
DataLoader(__A , batch_size=5_12 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
a_ : Union[str, Any] = []
for batch in dl:
a_ : Optional[int] = batch['input_ids'].ne(__A ).sum(1 ).tolist()
a_ : Dict = batch['labels'].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
a_ : List[str] = get_lens(__A )
a_ : Dict = SeqaSeqDataset(__A , __A , __A , __A , type_path='val' , **__A )
a_ : int = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 120
|
from string import ascii_uppercase
UpperCAmelCase_ : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ : Optional[int] = dict(enumerate(ascii_uppercase))
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Tuple = len(__A )
a_ : int = 0
while True:
if x == i:
a_ : Tuple = 0
if len(__A ) == len(__A ):
break
key += key[i]
i += 1
return key
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Optional[int] = ''
a_ : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Any = ''
a_ : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a_ : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
a_ : Tuple = 'THE GERMAN ATTACK'
a_ : Dict = 'SECRET'
a_ : Optional[Any] = generate_key(__A , __A )
a_ : Union[str, Any] = cipher_text(__A , __A )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__A , __A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 120
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: Any , UpperCamelCase: Any=13 , UpperCamelCase: int=7 , UpperCamelCase: Tuple=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=2 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: Dict=37 , UpperCamelCase: Tuple="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Any=5_12 , UpperCamelCase: Tuple=16 , UpperCamelCase: int=2 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: str=3 , UpperCamelCase: Any=4 , UpperCamelCase: Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = True
A__ = 99
A__ = 3_84
A__ = 2
A__ = 4
A__ = 37
A__ = """gelu"""
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = 1_28
A__ = 2
A__ = 9
A__ = 1
A__ = None
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: str , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: List[str] , UpperCamelCase: str ):
"""simple docstring"""
A__ = TFConvBertModel(config=UpperCamelCase )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(UpperCamelCase )
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: Dict , UpperCamelCase: Any ):
"""simple docstring"""
A__ = TFConvBertForMaskedLM(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFConvBertForSequenceClassification(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ = self.num_choices
A__ = TFConvBertForMultipleChoice(config=UpperCamelCase )
A__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFConvBertForTokenClassification(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = TFConvBertForQuestionAnswering(config=UpperCamelCase )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = TFConvBertModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = True
if hasattr(UpperCamelCase , """use_cache""" ):
A__ = True
A__ = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
A__ = getattr(self.model_tester , """key_length""" , UpperCamelCase )
for model_class in self.all_model_classes:
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
A__ = model_class(UpperCamelCase )
A__ = len(model(UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase , saved_model=UpperCamelCase )
A__ = os.path.join(UpperCamelCase , """saved_model""" , """1""" )
A__ = tf.keras.models.load_model(UpperCamelCase )
A__ = model(UpperCamelCase )
if self.is_encoder_decoder:
A__ = outputs["""encoder_hidden_states"""]
A__ = outputs["""encoder_attentions"""]
else:
A__ = outputs["""hidden_states"""]
A__ = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
A__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
A__ = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
A__ = getattr(self.model_tester , """key_length""" , UpperCamelCase )
A__ = getattr(self.model_tester , """key_length""" , UpperCamelCase )
def check_decoder_attentions_output(UpperCamelCase: Union[str, Any] ):
A__ = len(UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
A__ = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase: Tuple ):
A__ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = model_class(UpperCamelCase )
A__ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = len(UpperCamelCase )
self.assertEqual(config.output_hidden_states , UpperCamelCase )
check_encoder_attentions_output(UpperCamelCase )
if self.is_encoder_decoder:
A__ = model_class(UpperCamelCase )
A__ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase )
check_decoder_attentions_output(UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
A__ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase )
check_encoder_attentions_output(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
A__ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase )
check_encoder_attentions_output(UpperCamelCase )
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(UpperCamelCase )[0]
A__ = [1, 6, 7_68]
self.assertEqual(output.shape , UpperCamelCase )
A__ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1e-4 )
| 335
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = StableUnCLIPPipeline
UpperCAmelCase : int = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase : str = False
def lowerCAmelCase_ ( self : Dict ):
_A = 32
_A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
_A = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=_UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
_A = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
_A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
_A = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
_A = AutoencoderKL()
_A = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self : str ):
_A = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ):
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
_A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = torch.Generator(device='cpu' ).manual_seed(0 )
_A = pipe('anime turle' , generator=_UpperCAmelCase , output_type='np' )
_A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_A = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 369
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
UpperCAmelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowerCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCAmelCase_ ( self : int ):
_A = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = v.to_dict()
return d
| 271
| 0
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345
| 1
|
from datetime import datetime
import requests
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
__snake_case : Dict = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(__lowerCamelCase ).content
if __name__ == "__main__":
_snake_case : Dict = input("Enter Video/IGTV url: ").strip()
_snake_case : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 352
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["image_processor", "tokenizer"]
__UpperCAmelCase : str = "OwlViTImageProcessor"
__UpperCAmelCase : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]="max_length" , lowerCamelCase : Dict="np" , **lowerCamelCase : str ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
__snake_case : Union[str, Any] = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
__snake_case : Tuple = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
__snake_case : Dict = t + [" "] * (max_num_queries - len(lowerCamelCase ))
__snake_case : int = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__snake_case : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : List[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Any = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__snake_case : Any = BatchEncoding()
__snake_case : Tuple = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Union[str, Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
__snake_case : str = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> str:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Tuple:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 134
| 0
|
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int], _snake_case : int, _snake_case : Union[str, Any]=None, _snake_case : List[Any]=None ) ->Optional[Any]:
snake_case__ : Dict = data
snake_case__ : int = previous
snake_case__ : Tuple = next_node
def __str__( self : Optional[Any] ) ->str:
return F'''{self.data}'''
def lowercase_ ( self : List[Any] ) ->int:
return self.data
def lowercase_ ( self : Any ) ->Dict:
return self.next
def lowercase_ ( self : Any ) ->int:
return self.previous
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any], _snake_case : List[Any] ) ->Any:
snake_case__ : int = head
def __iter__( self : Dict ) ->int:
return self
def lowercase_ ( self : List[str] ) ->List[Any]:
if not self.current:
raise StopIteration
else:
snake_case__ : Union[str, Any] = self.current.get_data()
snake_case__ : Optional[int] = self.current.get_next()
return value
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] ) ->Tuple:
snake_case__ : List[str] = None # First node in list
snake_case__ : List[str] = None # Last node in list
def __str__( self : Optional[Any] ) ->int:
snake_case__ : int = self.head
snake_case__ : Union[str, Any] = []
while current is not None:
nodes.append(current.get_data() )
snake_case__ : Any = current.get_next()
return " ".join(str(_snake_case ) for node in nodes )
def __contains__( self : List[Any], _snake_case : int ) ->int:
snake_case__ : Union[str, Any] = self.head
while current:
if current.get_data() == value:
return True
snake_case__ : Optional[Any] = current.get_next()
return False
def __iter__( self : str ) ->Dict:
return LinkedListIterator(self.head )
def lowercase_ ( self : Dict ) ->List[Any]:
if self.head:
return self.head.get_data()
return None
def lowercase_ ( self : List[Any] ) ->Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowercase_ ( self : List[Any], _snake_case : Node ) ->None:
if self.head is None:
snake_case__ : Union[str, Any] = node
snake_case__ : Union[str, Any] = node
else:
self.insert_before_node(self.head, _snake_case )
def lowercase_ ( self : List[Any], _snake_case : Node ) ->None:
if self.head is None:
self.set_head(_snake_case )
else:
self.insert_after_node(self.tail, _snake_case )
def lowercase_ ( self : List[Any], _snake_case : int ) ->None:
snake_case__ : Any = Node(_snake_case )
if self.head is None:
self.set_head(_snake_case )
else:
self.set_tail(_snake_case )
def lowercase_ ( self : List[Any], _snake_case : Node, _snake_case : Node ) ->None:
snake_case__ : List[str] = node
snake_case__ : str = node.previous
if node.get_previous() is None:
snake_case__ : int = node_to_insert
else:
snake_case__ : Union[str, Any] = node_to_insert
snake_case__ : int = node_to_insert
def lowercase_ ( self : str, _snake_case : Node, _snake_case : Node ) ->None:
snake_case__ : Tuple = node
snake_case__ : Any = node.next
if node.get_next() is None:
snake_case__ : Optional[Any] = node_to_insert
else:
snake_case__ : Optional[int] = node_to_insert
snake_case__ : List[Any] = node_to_insert
def lowercase_ ( self : Dict, _snake_case : int, _snake_case : int ) ->None:
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = Node(_snake_case )
snake_case__ : Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(_snake_case, _snake_case )
return
current_position += 1
snake_case__ : Union[str, Any] = node.next
self.insert_after_node(self.tail, _snake_case )
def lowercase_ ( self : str, _snake_case : int ) ->Node:
snake_case__ : Optional[Any] = self.head
while node:
if node.get_data() == item:
return node
snake_case__ : Optional[Any] = node.get_next()
raise Exception('Node not found' )
def lowercase_ ( self : Optional[Any], _snake_case : Tuple ) ->Optional[int]:
if (node := self.get_node(_snake_case )) is not None:
if node == self.head:
snake_case__ : Tuple = self.head.get_next()
if node == self.tail:
snake_case__ : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(_snake_case )
@staticmethod
def lowercase_ ( _snake_case : Node ) ->None:
if node.get_next():
snake_case__ : Dict = node.previous
if node.get_previous():
snake_case__ : Optional[int] = node.next
snake_case__ : Tuple = None
snake_case__ : List[Any] = None
def lowercase_ ( self : List[str] ) ->str:
return self.head is None
def lowercase_ ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Dict = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """openai-gpt"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int], _snake_case : Dict=4_0_4_7_8, _snake_case : str=5_1_2, _snake_case : int=7_6_8, _snake_case : Tuple=1_2, _snake_case : Any=1_2, _snake_case : str="gelu", _snake_case : List[str]=0.1, _snake_case : Any=0.1, _snake_case : Dict=0.1, _snake_case : int=1e-5, _snake_case : Optional[Any]=0.0_2, _snake_case : List[Any]="cls_index", _snake_case : Any=True, _snake_case : Any=None, _snake_case : int=True, _snake_case : Optional[Any]=0.1, **_snake_case : List[Any], ) ->Optional[int]:
snake_case__ : int = vocab_size
snake_case__ : Dict = n_positions
snake_case__ : str = n_embd
snake_case__ : str = n_layer
snake_case__ : List[Any] = n_head
snake_case__ : List[Any] = afn
snake_case__ : Optional[Any] = resid_pdrop
snake_case__ : List[str] = embd_pdrop
snake_case__ : List[Any] = attn_pdrop
snake_case__ : Optional[int] = layer_norm_epsilon
snake_case__ : str = initializer_range
snake_case__ : List[str] = summary_type
snake_case__ : Optional[int] = summary_use_proj
snake_case__ : List[str] = summary_activation
snake_case__ : Optional[Any] = summary_first_dropout
snake_case__ : int = summary_proj_to_labels
super().__init__(**_snake_case )
| 277
| 1
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 355
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase = 3
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
print('Generating primitive root of p' )
while True:
_a : List[Any] = random.randrange(3 , lowerCAmelCase_ )
if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1:
continue
return g
def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('Generating prime p...' )
_a : int = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
_a : List[str] = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
_a : Any = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_a : List[Any] = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
_a : Tuple = (key_size, e_a, e_a, p)
_a : str = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_a , _a : Dict = generate_key(lowerCAmelCase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def __lowerCamelCase ( ) -> None:
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 107
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
lowerCAmelCase_ : int = sorted(string.lower() )
return len(A__ ) == len(set(A__ ) )
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter a string ").strip()
__A : Union[str, Any] = is_isogram(input_str)
print(F'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 120
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'philschmid/bart-large-cnn-samsum'
lowercase = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
lowercase = 'summarizer'
lowercase = AutoTokenizer
lowercase = AutoModelForSeqaSeqLM
lowercase = ['text']
lowercase = ['text']
def __lowercase ( self : Dict , lowerCamelCase : Dict ) -> Any:
return self.pre_processor(lowerCamelCase , return_tensors="""pt""" , truncation=lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
return self.model.generate(**lowerCamelCase )[0]
def __lowercase ( self : str , lowerCamelCase : Dict ) -> List[Any]:
return self.pre_processor.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
| 120
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = XGLMTokenizer
__UpperCamelCase : Optional[Any] = XGLMTokenizerFast
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = True
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_A: Tuple = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = '''<pad>'''
_A: str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_0_8 )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[int] = XGLMTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_A: str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_A: Dict = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_A: Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __magic_name__ ( self : int ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ , f.name )
_A: Optional[Any] = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase_ )
_A: List[Any] = pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: int = self.get_tokenizer()
_A: str = self.get_rust_tokenizer()
_A: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
_A: List[str] = tokenizer.tokenize(lowerCAmelCase_ )
_A: str = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: List[Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Any = self.get_rust_tokenizer()
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[str] = '''Hello World!'''
_A: Optional[Any] = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
_A: List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: str = {
'''input_ids''': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''facebook/xglm-564M''' , padding=lowerCAmelCase_ , )
| 354
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Tuple=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: str = parent
_A: List[Any] = batch_size
_A: Optional[int] = image_size
_A: Dict = num_channels
_A: str = embeddings_size
_A: Any = hidden_sizes
_A: Dict = depths
_A: Any = is_training
_A: int = use_labels
_A: Tuple = hidden_act
_A: int = num_labels
_A: int = scope
_A: str = len(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Union[str, Any] = self.get_config()
return config, pixel_values
def __magic_name__ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = FlaxRegNetModel(config=lowerCAmelCase_ )
_A: Optional[int] = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: Union[str, Any] = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A: Optional[int] = config_and_inputs
_A: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = FlaxRegNetModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : int ):
"""simple docstring"""
return
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Any = [*signature.parameters.keys()]
_A: Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
_A: int = model_class(lowerCAmelCase_ )
_A: List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A: Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: str = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_A: str = self.default_image_processor
_A: int = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = model(**lowerCAmelCase_ )
# verify the logits
_A: str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Tuple = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 301
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.