code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCAmelCase : Optional[int] = '''facebook/wmt19-en-de'''
_lowerCAmelCase : Optional[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCAmelCase : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCAmelCase : int = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCAmelCase : str = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
_lowerCAmelCase : List[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
_lowerCAmelCase : Optional[int] = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 454
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "dpt"
def __init__( self : List[str] ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : int=12 ,_snake_case : List[str]=3_072 ,_snake_case : List[str]="gelu" ,_snake_case : str=0.0 ,_snake_case : int=0.0 ,_snake_case : Optional[Any]=0.02 ,_snake_case : Any=1e-12 ,_snake_case : Tuple=384 ,_snake_case : int=16 ,_snake_case : Tuple=3 ,_snake_case : Optional[int]=False ,_snake_case : int=True ,_snake_case : Optional[int]=[2, 5, 8, 11] ,_snake_case : List[str]="project" ,_snake_case : Any=[4, 2, 1, 0.5] ,_snake_case : Union[str, Any]=[96, 192, 384, 768] ,_snake_case : List[str]=256 ,_snake_case : int=-1 ,_snake_case : Any=False ,_snake_case : List[Any]=True ,_snake_case : Tuple=0.4 ,_snake_case : int=255 ,_snake_case : Dict=0.1 ,_snake_case : Dict=[1, 1_024, 24, 24] ,_snake_case : Optional[Any]=[0, 1] ,_snake_case : List[str]=None ,**_snake_case : Optional[int] ,) -> Optional[int]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase__ : Union[str, Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
lowercase__ : Dict = BitConfig(**_snake_case )
elif isinstance(_snake_case ,_snake_case ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowercase__ : Tuple = BitConfig(**_snake_case )
elif isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
lowercase__ : Optional[Any] = backbone_featmap_shape
lowercase__ : Tuple = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
lowercase__ : List[str] = None
lowercase__ : Any = None
lowercase__ : Dict = []
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Any = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[Any] = qkv_bias
lowercase__ : Any = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
lowercase__ : str = readout_type
lowercase__ : Union[str, Any] = reassemble_factors
lowercase__ : int = neck_hidden_sizes
lowercase__ : List[str] = fusion_hidden_size
lowercase__ : Optional[int] = head_in_index
lowercase__ : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase__ : Tuple = use_auxiliary_head
lowercase__ : List[str] = auxiliary_loss_weight
lowercase__ : Tuple = semantic_loss_ignore_index
lowercase__ : Tuple = semantic_classifier_dropout
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__ : List[Any] = self.backbone_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 560
| 0
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,_a : str ,_a : List[Any]=13 ,_a : Any=30 ,_a : Optional[Any]=2 ,_a : str=3 ,_a : Tuple=True ,_a : List[Any]=True ,_a : Any=32 ,_a : List[Any]=5 ,_a : List[str]=4 ,_a : int=37 ,_a : str="gelu" ,_a : Tuple=0.1 ,_a : Optional[Any]=0.1 ,_a : Union[str, Any]=10 ,_a : Dict=0.02 ,_a : str=3 ,_a : int=0.6 ,_a : Any=None ,):
'''simple docstring'''
_a : Any = parent
_a : int = batch_size
_a : Any = image_size
_a : Any = patch_size
_a : Optional[int] = num_channels
_a : List[Any] = is_training
_a : Optional[int] = use_labels
_a : Dict = hidden_size
_a : int = num_hidden_layers
_a : str = num_attention_heads
_a : Any = intermediate_size
_a : Optional[int] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Any = type_sequence_label_size
_a : Optional[int] = initializer_range
_a : List[Any] = mask_ratio
_a : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_a : List[Any] = (image_size // patch_size) ** 2
_a : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_a ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def __lowercase ( self : Optional[int] ,_a : Any ,_a : Optional[int] ,_a : str ):
'''simple docstring'''
_a : Union[str, Any] = ViTMAEModel(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ):
'''simple docstring'''
_a : int = ViTMAEForPreTraining(_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a )
_a : List[Any] = (self.image_size // self.patch_size) ** 2
_a : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_a : Any = 1
_a : Union[str, Any] = ViTMAEForPreTraining(_a )
model.to(_a )
model.eval()
_a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : str = model(_a )
_a : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.prepare_config_and_inputs()
_a, _a, _a : List[str] = config_and_inputs
_a : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__UpperCAmelCase : str = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
__UpperCAmelCase : str = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : int = False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[Any] = ViTMAEModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def __lowercase ( self : List[str] ,_a : Tuple ,_a : List[Any] ,_a : Any ):
'''simple docstring'''
np.random.seed(2 )
_a : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_a : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a : Tuple = torch.from_numpy(_a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_a : Dict = pt_noise
super().check_pt_tf_models(_a ,_a ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(_a )
model.to(_a )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs[0].cpu().numpy()
_a : Any = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
_a : Tuple = model_class.from_pretrained(_a )
model.to(_a )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_a ,_a ) )
# Make sure we don't have nans
_a : List[Any] = after_outputs[0].cpu().numpy()
_a : Optional[int] = 0
_a : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a ,1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : int = ViTMAEModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Dict ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
np.random.seed(2 )
_a : int = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(_a )
_a : List[str] = self.default_image_processor
_a : Tuple = prepare_img()
_a : Optional[Any] = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_a : Dict = ViTMAEConfig()
_a : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_a : str = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_a : Optional[int] = model(**_a ,noise=torch.from_numpy(_a ).to(device=_a ) )
# verify the logits
_a : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,_a )
_a : Union[str, Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(_a ) ,atol=1E-4 ) )
| 319
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase : Optional[Any] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
_UpperCamelCase : Tuple = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
_UpperCamelCase : str = "▁"
class UpperCAmelCase_ ( lowerCAmelCase_):
lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[Any] = AlbertTokenizer
def __init__( self , a=None , a=None , a=True , a=True , a=False , a="[CLS]" , a="[SEP]" , a="<unk>" , a="[SEP]" , a="<pad>" , a="[CLS]" , a="[MASK]" , **a , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase__ : List[str] = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Optional[Any] = do_lower_case
lowercase__ : Dict = remove_space
lowercase__ : Dict = keep_accents
lowercase__ : str = vocab_file
lowercase__ : List[str] = False if not self.vocab_file else True
def _UpperCAmelCase ( self , a , a = None ) -> Optional[int]:
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a , a = None ) -> Optional[int]:
lowercase__ : Union[str, Any] = [self.sep_token_id]
lowercase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , a , a = None ) -> Tuple:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 599
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Tuple = 'efficientnet'
def __init__( self : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 1e-5
| 277
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCAmelCase = 0
__lowerCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCAmelCase = tuple[int, int]
class _lowerCAmelCase :
def __init__( self : Tuple , a : int , a : int , a : int , a : int , a : int , a : Node | None , ) -> None:
"""simple docstring"""
lowercase = pos_x
lowercase = pos_y
lowercase = (pos_y, pos_x)
lowercase = goal_x
lowercase = goal_y
lowercase = g_cost
lowercase = parent
lowercase = self.calculate_heuristic()
lowercase = self.g_cost + self.h_cost
def _lowerCAmelCase ( self : Tuple ) -> float:
"""simple docstring"""
lowercase = self.pos_x - self.goal_x
lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(a ) + abs(a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , a : Node ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class _lowerCAmelCase :
def __init__( self : List[Any] , a : TPosition , a : TPosition ) -> List[Any]:
"""simple docstring"""
lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a )
lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , a )
lowercase = [self.start]
lowercase = []
lowercase = False
def _lowerCAmelCase ( self : str ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(a )
self.closed_nodes.append(a )
lowercase = self.get_successors(a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a )
else:
# retrieve the best current path
lowercase = self.open_nodes.pop(self.open_nodes.index(a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a )
else:
self.open_nodes.append(a )
return [self.start.pos]
def _lowerCAmelCase ( self : Tuple , a : Node ) -> list[Node]:
"""simple docstring"""
lowercase = []
for action in delta:
lowercase = parent.pos_x + action[1]
lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a , a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a , ) )
return successors
def _lowerCAmelCase ( self : List[str] , a : Node | None ) -> list[TPosition]:
"""simple docstring"""
lowercase = node
lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase = current_node.parent
path.reverse()
return path
class _lowerCAmelCase :
def __init__( self : str , a : TPosition , a : TPosition ) -> None:
"""simple docstring"""
lowercase = AStar(a , a )
lowercase = AStar(a , a )
lowercase = False
def _lowerCAmelCase ( self : int ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase = self.fwd_astar.open_nodes.pop(0 )
lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
a , a )
self.fwd_astar.closed_nodes.append(a )
self.bwd_astar.closed_nodes.append(a )
lowercase = current_bwd_node
lowercase = current_fwd_node
lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(a ),
self.bwd_astar: self.bwd_astar.get_successors(a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(a )
else:
# retrieve the best current path
lowercase = astar.open_nodes.pop(
astar.open_nodes.index(a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(a )
else:
astar.open_nodes.append(a )
return [self.fwd_astar.start.pos]
def _lowerCAmelCase ( self : Dict , a : Node , a : Node ) -> list[TPosition]:
"""simple docstring"""
lowercase = self.fwd_astar.retrace_path(a )
lowercase = self.bwd_astar.retrace_path(a )
bwd_path.pop()
bwd_path.reverse()
lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCAmelCase = (0, 0)
__lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCAmelCase = time.time()
__lowerCAmelCase = AStar(init, goal)
__lowerCAmelCase = a_star.search()
__lowerCAmelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__lowerCAmelCase = time.time()
__lowerCAmelCase = BidirectionalAStar(init, goal)
__lowerCAmelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 708
|
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase = []
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = resnets
lowercase = attentions
if self.add_downsample:
lowercase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , a : Tuple , a : Union[str, Any] , a : Tuple , a : str=True ) -> Tuple:
"""simple docstring"""
lowercase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowercase = resnet(a , a , deterministic=a )
lowercase = attn(a , a , deterministic=a )
output_states += (hidden_states,)
if self.add_downsample:
lowercase = self.downsamplers_a(a )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=a , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
if self.add_downsample:
lowercase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , a : Tuple , a : List[Any] , a : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
lowercase = ()
for resnet in self.resnets:
lowercase = resnet(a , a , deterministic=a )
output_states += (hidden_states,)
if self.add_downsample:
lowercase = self.downsamplers_a(a )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase = []
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase = self.prev_output_channel if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = resnets
lowercase = attentions
if self.add_upsample:
lowercase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , a : Optional[int] , a : Optional[int] , a : Optional[int] , a : List[str] , a : Dict=True ) -> List[Any]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowercase = res_hidden_states_tuple[-1]
lowercase = res_hidden_states_tuple[:-1]
lowercase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase = resnet(a , a , deterministic=a )
lowercase = attn(a , a , deterministic=a )
if self.add_upsample:
lowercase = self.upsamplers_a(a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = True
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase = []
for i in range(self.num_layers ):
lowercase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase = self.prev_output_channel if i == 0 else self.out_channels
lowercase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
if self.add_upsample:
lowercase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , a : Any , a : Any , a : Tuple , a : Dict=True ) -> Optional[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
lowercase = res_hidden_states_tuple[-1]
lowercase = res_hidden_states_tuple[:-1]
lowercase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase = resnet(a , a , deterministic=a )
if self.add_upsample:
lowercase = self.upsamplers_a(a )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 1
__lowerCAmelCase : bool = False
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
# there is always at least one resnet
lowercase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowercase = []
for _ in range(self.num_layers ):
lowercase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(a )
lowercase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(a )
lowercase = resnets
lowercase = attentions
def __call__( self : List[Any] , a : Optional[int] , a : Tuple , a : List[Any] , a : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
lowercase = self.resnets[0](a , a )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowercase = attn(a , a , deterministic=a )
lowercase = resnet(a , a , deterministic=a )
return hidden_states
| 396
| 0
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46
|
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->list:
"""simple docstring"""
__magic_name__ : Optional[Any] = word.split()
def justify(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) -> str:
__magic_name__ : int = max_width - width
__magic_name__ : Optional[int] = len(UpperCAmelCase )
if len(UpperCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__magic_name__ : List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__magic_name__ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__magic_name__ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCAmelCase ):
num_spaces_between_words_list[i] += 1
__magic_name__ : List[str] = []
for i in range(UpperCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCAmelCase )
__magic_name__ : List[Any] = []
__magic_name__ : list[str] = []
__magic_name__ : List[str] = 0
for word in words:
if width + len(UpperCAmelCase ) + len(UpperCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCAmelCase )
width += len(UpperCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) )
# reset new line and new width
__magic_name__ , __magic_name__ : Optional[int] = [word], len(UpperCAmelCase )
__magic_name__ : List[str] = max_width - width - len(UpperCAmelCase )
answer.append(''' '''.join(UpperCAmelCase ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 154
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self ):
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
__lowercase = AutoTokenizer.from_pretrained("google/mt5-small" )
__lowercase = tokenizer("Hello there" , return_tensors="tf" ).input_ids
__lowercase = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
__lowercase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ).loss
__lowercase = -tf.math.reduce_mean(lowerCAmelCase_ ).numpy()
__lowercase = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 576
|
from collections.abc import Sequence
def __lowercase ( _UpperCAmelCase = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
__lowercase = nums[0]
for i in range(1 , len(_UpperCAmelCase ) ):
__lowercase = nums[i]
__lowercase = max(_UpperCAmelCase , ans + num , _UpperCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase__ = int(input('Enter number of elements : ').strip())
lowerCAmelCase__ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 576
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCamelCase ( unittest.TestCase ):
__a = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__a = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__a = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: str= ZeroShotClassificationPipeline(
model=lowerCAmelCase , tokenizer=lowerCAmelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE__: int= classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: Dict= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE__: str= classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE__: Tuple= classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier(lowerCAmelCase , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=lowerCAmelCase , )
self.run_entailment_id(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE__: str= config.labelaid
SCREAMING_SNAKE_CASE__: int= zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE__: str= {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE__: Any= {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: List[Any]= {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: Tuple= {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE__: Dict= original_labelaid
self.assertEqual(lowerCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Union[str, Any]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
SCREAMING_SNAKE_CASE__: List[str]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
SCREAMING_SNAKE_CASE__: List[Any]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Dict= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__: Tuple= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 64
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__ ( nn.Module ):
def __init__(self : Union[str, Any], __UpperCAmelCase : int = 16, __UpperCAmelCase : int = 88, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : int = 1, __UpperCAmelCase : float = 0.0, __UpperCAmelCase : int = 32, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : bool = False, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : str = "geglu", __UpperCAmelCase : Optional[int] = None, ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Any = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCAmelCase, attention_head_dim=__UpperCAmelCase, in_channels=__UpperCAmelCase, num_layers=__UpperCAmelCase, dropout=__UpperCAmelCase, norm_num_groups=__UpperCAmelCase, cross_attention_dim=__UpperCAmelCase, attention_bias=__UpperCAmelCase, sample_size=__UpperCAmelCase, num_vector_embeds=__UpperCAmelCase, activation_fn=__UpperCAmelCase, num_embeds_ada_norm=__UpperCAmelCase, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
SCREAMING_SNAKE_CASE : int = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
SCREAMING_SNAKE_CASE : Dict = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 0]
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : Dict, __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : List[str]=None, __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : List[str]=None, __UpperCAmelCase : bool = True, ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = hidden_states
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
SCREAMING_SNAKE_CASE : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
SCREAMING_SNAKE_CASE : str = self.transformer_index_for_condition[i]
SCREAMING_SNAKE_CASE : Dict = self.transformers[transformer_index](
__UpperCAmelCase, encoder_hidden_states=__UpperCAmelCase, timestep=__UpperCAmelCase, cross_attention_kwargs=__UpperCAmelCase, return_dict=__UpperCAmelCase, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
SCREAMING_SNAKE_CASE : Optional[int] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCAmelCase )
| 507
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _a ( lowerCamelCase__ ) -> List[str]:
return EnvironmentCommand()
def _a ( lowerCamelCase__ ) -> Any:
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase__ ( UpperCAmelCase ):
@staticmethod
def UpperCAmelCase_ (_snake_case : ArgumentParser ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = parser.add_parser('env' )
download_parser.set_defaults(func=_snake_case )
download_parser.add_argument(
'--accelerate-config_file' , default=_snake_case , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=_snake_case )
def __init__(self : Tuple , _snake_case : Optional[Any] , *_snake_case : Any ) -> None:
"""simple docstring"""
lowerCamelCase_ : str = accelerate_config_file
def UpperCAmelCase_ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 'not installed'
if is_safetensors_available():
import safetensors
lowerCamelCase_ : List[str] = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
lowerCamelCase_ : Dict = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
lowerCamelCase_ : List[Any] = 'not installed'
lowerCamelCase_ : Dict = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCamelCase_ : Optional[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_snake_case ):
lowerCamelCase_ : Union[str, Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCamelCase_ : Union[str, Any] = (
'\n'.join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(_snake_case , _snake_case )
else f'\t{accelerate_config}'
)
lowerCamelCase_ : Union[str, Any] = 'not installed'
lowerCamelCase_ : Tuple = 'NA'
if is_torch_available():
import torch
lowerCamelCase_ : Union[str, Any] = torch.__version__
lowerCamelCase_ : Optional[int] = torch.cuda.is_available()
lowerCamelCase_ : List[str] = 'not installed'
lowerCamelCase_ : List[Any] = 'NA'
if is_tf_available():
import tensorflow as tf
lowerCamelCase_ : str = tf.__version__
try:
# deprecated in v2.1
lowerCamelCase_ : Union[str, Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCamelCase_ : Dict = bool(tf.config.list_physical_devices('GPU' ) )
lowerCamelCase_ : List[Any] = 'not installed'
lowerCamelCase_ : List[str] = 'not installed'
lowerCamelCase_ : List[Any] = 'not installed'
lowerCamelCase_ : str = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
lowerCamelCase_ : int = flax.__version__
lowerCamelCase_ : List[str] = jax.__version__
lowerCamelCase_ : Optional[Any] = jaxlib.__version__
lowerCamelCase_ : str = jax.lib.xla_bridge.get_backend().platform
lowerCamelCase_ : str = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': f'{safetensors_version}',
'Accelerate version': f'{accelerate_version}',
'Accelerate config': f'{accelerate_config_str}',
'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})',
'Tensorflow version (GPU?)': f'{tf_version} ({tf_cuda_available})',
'Flax version (CPU?/GPU?/TPU?)': f'{flax_version} ({jax_backend})',
'Jax version': f'{jax_version}',
'JaxLib version': f'{jaxlib_version}',
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(_snake_case ) )
return info
@staticmethod
def UpperCAmelCase_ (_snake_case : List[str] ) -> Tuple:
"""simple docstring"""
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 144
|
import math
UpperCamelCase = 1_0
UpperCamelCase = 7
UpperCamelCase = BALLS_PER_COLOUR * NUM_COLOURS
def _a ( lowerCamelCase__ = 20 ) -> str:
lowerCamelCase_ : List[str] = math.comb(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
lowerCamelCase_ : int = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 144
| 1
|
import copy
import random
from transformers import CLIPTokenizer
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,*__A : Any ,**__A : Tuple ) -> Dict:
super().__init__(*__A ,**__A )
_lowercase = {}
def __UpperCAmelCase ( self : int ,__A : int ,*__A : List[Any] ,**__A : Any ) -> Union[str, Any]:
_lowercase = super().add_tokens(__A ,*__A ,**__A )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Union[str, Any] ,*__A : Dict ,__A : List[Any]=1 ,**__A : str ) -> Optional[int]:
_lowercase = []
if num_vec_per_token == 1:
self.try_adding_tokens(__A ,*__A ,**__A )
output.append(__A )
else:
_lowercase = []
for i in range(__A ):
_lowercase = placeholder_token + F"""_{i}"""
self.try_adding_tokens(__A ,*__A ,**__A )
output.append(__A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
_lowercase = output
def __UpperCAmelCase ( self : str ,__A : int ,__A : Union[str, Any]=False ,__A : Union[str, Any]=1.0 ) -> Union[str, Any]:
if isinstance(__A ,__A ):
_lowercase = []
for i in range(len(__A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=__A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowercase = self.token_map[placeholder_token]
_lowercase = tokens[: 1 + int(len(__A ) * prop_tokens_to_load )]
if vector_shuffle:
_lowercase = copy.copy(__A )
random.shuffle(__A )
_lowercase = text.replace(__A ,' '.join(__A ) )
return text
def __call__( self : Union[str, Any] ,__A : Optional[int] ,*__A : Optional[Any] ,__A : Optional[Any]=False ,__A : int=1.0 ,**__A : Dict ) -> Tuple:
return super().__call__(
self.replace_placeholder_tokens_in_text(
__A ,vector_shuffle=__A ,prop_tokens_to_load=__A ) ,*__A ,**__A ,)
def __UpperCAmelCase ( self : int ,__A : Dict ,*__A : Optional[int] ,__A : int=False ,__A : str=1.0 ,**__A : Optional[Any] ) -> List[str]:
return super().encode(
self.replace_placeholder_tokens_in_text(
__A ,vector_shuffle=__A ,prop_tokens_to_load=__A ) ,*__A ,**__A ,)
| 67
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 67
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : str =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase__ )
if len(UpperCamelCase__ ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(UpperCamelCase__ )} and {rowsa}"
)
raise ValueError(UpperCamelCase__ )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape
strictly_diagonally_dominant(UpperCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] =[]
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for col in range(UpperCamelCase__ ):
if col == row:
SCREAMING_SNAKE_CASE__ : int =table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Any =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom
new_val.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val
return [float(UpperCamelCase__ ) for i in new_val]
def _a( UpperCamelCase__ : NDArray[floataa] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape
SCREAMING_SNAKE_CASE__ : Any =True
for i in range(0, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] =checkpoints.load_tax_checkpoint(__a )
SCREAMING_SNAKE_CASE : List[Any] =flatten_dict(__a )
return flax_params
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict ={}
SCREAMING_SNAKE_CASE : int ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
SCREAMING_SNAKE_CASE : Tuple ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE : int ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE : Optional[Any] =new_key.replace(__a , __a )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE : Dict =new_key.replace(__a , __a )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE : int =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __a )
SCREAMING_SNAKE_CASE : str =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE : List[str] =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __a )
SCREAMING_SNAKE_CASE : Union[str, Any] =flax_dict[key]
SCREAMING_SNAKE_CASE : Any ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE : Optional[Any] =torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ ( __a , __a , __a=False , __a=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] =get_flax_param(__a )
if not use_large:
SCREAMING_SNAKE_CASE : Any =PixaStructVisionConfig()
SCREAMING_SNAKE_CASE : Dict =PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE : Tuple =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
SCREAMING_SNAKE_CASE : Tuple =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
SCREAMING_SNAKE_CASE : Optional[Any] =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__a )
SCREAMING_SNAKE_CASE : Optional[int] =PixaStructForConditionalGeneration(__a )
SCREAMING_SNAKE_CASE : Optional[Any] =rename_and_convert_flax_params(__a )
model.load_state_dict(__a )
SCREAMING_SNAKE_CASE : Optional[Any] =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
SCREAMING_SNAKE_CASE : List[Any] =PixaStructImageProcessor()
SCREAMING_SNAKE_CASE : Optional[Any] =PixaStructProcessor(image_processor=__a , tokenizer=__a )
if use_large:
SCREAMING_SNAKE_CASE : List[Any] =4096
SCREAMING_SNAKE_CASE : List[Any] =True
# mkdir if needed
os.makedirs(__a , exist_ok=__a )
model.save_pretrained(__a )
processor.save_pretrained(__a )
print('''Model saved in {}'''.format(__a ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 258
|
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[str]] =[[] for _ in range(__a )]
SCREAMING_SNAKE_CASE : Any =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__a ) <= key:
return input_string
for position, character in enumerate(__a ):
SCREAMING_SNAKE_CASE : Any =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : Optional[int] =min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__a )
SCREAMING_SNAKE_CASE : int =[''''''.join(__a ) for row in temp_grid]
SCREAMING_SNAKE_CASE : List[Any] =''''''.join(__a )
return output_string
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any =[]
SCREAMING_SNAKE_CASE : Optional[int] =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE : list[list[str]] =[[] for _ in range(__a )] # generates template
for position in range(len(__a ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : Optional[int] =min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
SCREAMING_SNAKE_CASE : Optional[Any] =0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE : List[Any] =input_string[counter : counter + len(__a )]
grid.append(list(__a ) )
counter += len(__a )
SCREAMING_SNAKE_CASE : int ='''''' # reads as zigzag
for position in range(len(__a ) ):
SCREAMING_SNAKE_CASE : List[str] =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : Union[str, Any] =min(__a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase_ ( __a ) -> dict[int, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict ={}
for key_guess in range(1 , len(__a ) ): # tries every key
SCREAMING_SNAKE_CASE : Optional[Any] =decrypt(__a , __a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : int = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['''OwlViTFeatureExtractor''']
__UpperCamelCase : int = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 417
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase : Optional[Any] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase : Tuple = {
'''RUCAIBox/mvp''': 1024,
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["""input_ids""", """attention_mask"""]
__magic_name__ = MvpTokenizer
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="replace" , UpperCAmelCase__="<s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__=False , UpperCAmelCase__=True , **UpperCAmelCase__ , ) -> List[Any]:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
_A : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
_A : Dict = getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) )
_A : List[Any] = add_prefix_space
_A : Tuple = pre_tok_class(**UpperCAmelCase__ )
_A : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_A : Any = '''post_processor'''
_A : Union[str, Any] = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
_A : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A : int = tuple(state['''sep'''] )
if "cls" in state:
_A : Union[str, Any] = tuple(state['''cls'''] )
_A : int = False
if state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
_A : Optional[int] = add_prefix_space
_A : Union[str, Any] = True
if state.get('''trim_offsets''' , UpperCAmelCase__ ) != trim_offsets:
_A : List[str] = trim_offsets
_A : int = True
if changes_to_apply:
_A : Optional[int] = getattr(UpperCAmelCase__ , state.pop('''type''' ) )
_A : str = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Tuple:
_A : Any = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
_A : Any = value
def _lowerCamelCase ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> BatchEncoding:
_A : Optional[int] = kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> BatchEncoding:
_A : int = kwargs.get('''is_split_into_words''' , UpperCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
_A : List[Any] = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Tuple:
_A : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 417
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE ) ->YolosConfig:
"""simple docstring"""
lowerCAmelCase__ :List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase__ :Tuple = 192
lowerCAmelCase__ :List[str] = 768
lowerCAmelCase__ :Optional[int] = 12
lowerCAmelCase__ :int = 3
lowerCAmelCase__ :List[str] = [800, 1333]
lowerCAmelCase__ :Optional[Any] = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase__ :List[Any] = 330
lowerCAmelCase__ :str = 14
lowerCAmelCase__ :str = 6
lowerCAmelCase__ :Dict = 1320
elif "yolos_s" in yolos_name:
lowerCAmelCase__ :int = 384
lowerCAmelCase__ :int = 1536
lowerCAmelCase__ :int = 12
lowerCAmelCase__ :List[str] = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase__ :Tuple = [800, 1344]
lowerCAmelCase__ :List[str] = 91
lowerCAmelCase__ :Dict = 'huggingface/label-files'
lowerCAmelCase__ :Union[str, Any] = 'coco-detection-id2label.json'
lowerCAmelCase__ :List[str] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Optional[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = idalabel
lowerCAmelCase__ :List[Any] = {v: k for k, v in idalabel.items()}
return config
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ :Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ :Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ :int = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase__ :int = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ :Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ :Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ :Dict = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase__ :List[str] = in_proj_bias[-config.hidden_size :]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
if "backbone" in name:
lowerCAmelCase__ :str = name.replace('backbone' , 'vit' )
if "cls_token" in name:
lowerCAmelCase__ :str = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
lowerCAmelCase__ :Tuple = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
lowerCAmelCase__ :Dict = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
lowerCAmelCase__ :List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowerCAmelCase__ :Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
lowerCAmelCase__ :List[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowerCAmelCase__ :str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase__ :List[Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase__ :List[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase__ :Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase__ :str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase__ :int = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
lowerCAmelCase__ :Tuple = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
lowerCAmelCase__ :Tuple = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
lowerCAmelCase__ :Tuple = name.replace('vit.norm' , 'vit.layernorm' )
return name
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ :Any = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowerCAmelCase__ :str = key.split('.' )
lowerCAmelCase__ :Any = int(key_split[2] )
lowerCAmelCase__ :Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase__ :Dict = val[:dim, :]
lowerCAmelCase__ :Optional[Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ :Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase__ :Dict = val[:dim]
lowerCAmelCase__ :List[Any] = val[dim : dim * 2]
lowerCAmelCase__ :Any = val[-dim:]
else:
lowerCAmelCase__ :int = val
return orig_state_dict
def __A () ->torch.Tensor:
"""simple docstring"""
lowerCAmelCase__ :Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ :Union[str, Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :str = get_yolos_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
lowerCAmelCase__ :Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# load 🤗 model
lowerCAmelCase__ :Optional[Any] = YolosForObjectDetection(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ :str = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase__ :Dict = 800 if yolos_name != 'yolos_ti' else 512
lowerCAmelCase__ :Dict = YolosImageProcessor(format='coco_detection' , size=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ :List[Any] = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = outputs.logits, outputs.pred_boxes
lowerCAmelCase__ , lowerCAmelCase__ :int = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase__ :int = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
lowerCAmelCase__ :Tuple = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
lowerCAmelCase__ :Dict = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase__ :Dict = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
lowerCAmelCase__ :Optional[int] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
lowerCAmelCase__ :str = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
lowerCAmelCase__ :Any = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
lowerCAmelCase__ :Union[str, Any] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
lowerCAmelCase__ :Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='hustvl' )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='hustvl' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 93
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->None:
"""simple docstring"""
lowerCAmelCase__ :List[str] = F"\n{hint}" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = requirement, None, None
else:
lowerCAmelCase__ :List[str] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F" got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = match[0]
lowerCAmelCase__ :List[Any] = want_full.split(',' ) # there could be multiple requirements
lowerCAmelCase__ :Any = {}
for w in want_range:
lowerCAmelCase__ :Tuple = re.findall(r'^([\s!=<>]{1,2})(.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F" but got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :int = match[0]
lowerCAmelCase__ :str = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
lowerCAmelCase__ :Any = '.'.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowerCAmelCase__ :List[Any] = importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 93
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ = TaTokenizerFast
lowerCamelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 51
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : List[Any] = len(lowercase_ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase : List[Any] = 0
print(lowercase_ ,end="," )
# Consider rest of the activities
for j in range(lowercase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase_ ,end="," )
_UpperCamelCase : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 51
| 1
|
from typing import List
import numpy as np
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Dict = {key: len(SCREAMING_SNAKE_CASE ) for key, value in gen_kwargs.items() if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
SCREAMING_SNAKE_CASE_ : Any = max(lists_lengths.values() , default=0 )
return max(1 , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[range]:
SCREAMING_SNAKE_CASE_ : Dict = []
for group_idx in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
SCREAMING_SNAKE_CASE_ : Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
SCREAMING_SNAKE_CASE_ : List[Any] = range(SCREAMING_SNAKE_CASE , start + num_shards_to_add )
shards_indices_per_group.append(SCREAMING_SNAKE_CASE )
return shards_indices_per_group
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[dict]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE )
if num_shards == 1:
return [dict(SCREAMING_SNAKE_CASE )]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _distribute_shards(num_shards=SCREAMING_SNAKE_CASE , max_num_jobs=SCREAMING_SNAKE_CASE )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(SCREAMING_SNAKE_CASE ) )
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , SCREAMING_SNAKE_CASE )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict:
SCREAMING_SNAKE_CASE_ : List[str] = {len(SCREAMING_SNAKE_CASE ) for value in gen_kwargs.values() if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
for size in list_sizes:
SCREAMING_SNAKE_CASE_ : Tuple = list(range(SCREAMING_SNAKE_CASE ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
SCREAMING_SNAKE_CASE_ : Dict = dict(SCREAMING_SNAKE_CASE )
for key, value in shuffled_kwargs.items():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = [value[i] for i in indices_per_size[len(SCREAMING_SNAKE_CASE )]]
return shuffled_kwargs
| 345
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__: int = datasets.logging.get_logger(__name__)
lowerCAmelCase__: Optional[int] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCAmelCase__: Any = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCAmelCase__: Tuple = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="dummy_doc" ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Dict = {doc: key_lines}
SCREAMING_SNAKE_CASE_ : Dict = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : List[str] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = reader.get_doc_mentions(SCREAMING_SNAKE_CASE , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ : Optional[Any] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE , key_doc_lines[doc] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ : Optional[int] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_coref_infos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ : Dict = (conll / 3) * 100
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : Dict = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ : Dict = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ : int = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ : Any = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ : Optional[int] = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 345
| 1
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase_ : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
UpperCAmelCase_ : str = f'''https://www.google.com/search?q={query}&num=100'''
UpperCAmelCase_ : int = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
UpperCAmelCase_ : str = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
UpperCAmelCase_ : Any = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 708
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 11
| 0
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowercase : Union[str, Any] =collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowercase : Optional[Any] ="https://storage.googleapis.com/cvdf-datasets/mnist/"
def A__ ( lowercase: str ) -> Union[str, Any]:
A : Optional[int] =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=__lowerCamelCase )[0]
@deprecated(__lowerCamelCase, 'Please use tf.data to implement this functionality.' )
def A__ ( lowercase: Optional[int] ) -> Union[str, Any]:
print('Extracting', f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
A : Union[str, Any] =_readaa(__lowerCamelCase )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
A : Any =_readaa(__lowerCamelCase )
A : Union[str, Any] =_readaa(__lowerCamelCase )
A : List[Any] =_readaa(__lowerCamelCase )
A : Optional[int] =bytestream.read(rows * cols * num_images )
A : Union[str, Any] =numpy.frombuffer(__lowerCamelCase, dtype=numpy.uinta )
A : Union[str, Any] =data.reshape(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, 1 )
return data
@deprecated(__lowerCamelCase, 'Please use tf.one_hot on tensors.' )
def A__ ( lowercase: Optional[int], lowercase: List[Any] ) -> Tuple:
A : Any =labels_dense.shape[0]
A : List[str] =numpy.arange(__lowerCamelCase ) * num_classes
A : Union[str, Any] =numpy.zeros((num_labels, num_classes) )
A : List[Any] =1
return labels_one_hot
@deprecated(__lowerCamelCase, 'Please use tf.data to implement this functionality.' )
def A__ ( lowercase: Optional[Any], lowercase: Optional[Any]=False, lowercase: Optional[int]=10 ) -> List[str]:
print('Extracting', f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
A : str =_readaa(__lowerCamelCase )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
A : str =_readaa(__lowerCamelCase )
A : Optional[int] =bytestream.read(__lowerCamelCase )
A : List[Any] =numpy.frombuffer(__lowerCamelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCamelCase, __lowerCamelCase )
return labels
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
@deprecated(
__lowercase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=dtypes.floataa , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> List[Any]:
A : str =random_seed.get_seed(__lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A : Union[str, Any] =dtypes.as_dtype(__lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
A : int =1_00_00
A : Dict =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
A : Dict =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A : Any =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A : Any =images.astype(numpy.floataa )
A : Union[str, Any] =numpy.multiply(__lowercase , 1.0 / 2_5_5.0 )
A : str =images
A : Dict =labels
A : int =0
A : int =0
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
return self._images
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
return self._labels
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
return self._num_examples
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
return self._epochs_completed
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> List[str]:
if fake_data:
A : Any =[1] * 7_84
A : Any =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowercase )],
[fake_label for _ in range(__lowercase )],
)
A : Dict =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A : str =numpy.arange(self._num_examples )
numpy.random.shuffle(__lowercase )
A : int =self.images[perma]
A : List[str] =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A : Tuple =self._num_examples - start
A : int =self._images[start : self._num_examples]
A : Optional[int] =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A : Union[str, Any] =numpy.arange(self._num_examples )
numpy.random.shuffle(__lowercase )
A : str =self.images[perm]
A : List[str] =self.labels[perm]
# Start next epoch
A : List[Any] =0
A : Optional[Any] =batch_size - rest_num_examples
A : Any =self._index_in_epoch
A : List[str] =self._images[start:end]
A : Optional[int] =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A : Optional[int] =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCamelCase, 'Please write your own downloading logic.' )
def A__ ( lowercase: Tuple, lowercase: List[str], lowercase: int ) -> Dict:
if not gfile.Exists(__lowerCamelCase ):
gfile.MakeDirs(__lowerCamelCase )
A : List[Any] =os.path.join(__lowerCamelCase, __lowerCamelCase )
if not gfile.Exists(__lowerCamelCase ):
urllib.request.urlretrieve(__lowerCamelCase, __lowerCamelCase ) # noqa: S310
with gfile.GFile(__lowerCamelCase ) as f:
A : str =f.size()
print('Successfully downloaded', __lowerCamelCase, __lowerCamelCase, 'bytes.' )
return filepath
@deprecated(
__lowerCamelCase, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def A__ ( lowercase: Tuple, lowercase: int=False, lowercase: Any=False, lowercase: Dict=dtypes.floataa, lowercase: Optional[int]=True, lowercase: Union[str, Any]=5_000, lowercase: Dict=None, lowercase: Union[str, Any]=DEFAULT_SOURCE_URL, ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=__lowerCamelCase, one_hot=__lowerCamelCase, dtype=__lowerCamelCase, seed=__lowerCamelCase )
A : Union[str, Any] =fake()
A : Optional[Any] =fake()
A : Optional[int] =fake()
return _Datasets(train=__lowerCamelCase, validation=__lowerCamelCase, test=__lowerCamelCase )
if not source_url: # empty string check
A : Tuple =DEFAULT_SOURCE_URL
A : Tuple ="""train-images-idx3-ubyte.gz"""
A : Union[str, Any] ="""train-labels-idx1-ubyte.gz"""
A : Optional[int] ="""t10k-images-idx3-ubyte.gz"""
A : List[str] ="""t10k-labels-idx1-ubyte.gz"""
A : Optional[int] =_maybe_download(
__lowerCamelCase, __lowerCamelCase, source_url + train_images_file )
with gfile.Open(__lowerCamelCase, 'rb' ) as f:
A : Dict =_extract_images(__lowerCamelCase )
A : Tuple =_maybe_download(
__lowerCamelCase, __lowerCamelCase, source_url + train_labels_file )
with gfile.Open(__lowerCamelCase, 'rb' ) as f:
A : Optional[Any] =_extract_labels(__lowerCamelCase, one_hot=__lowerCamelCase )
A : Dict =_maybe_download(
__lowerCamelCase, __lowerCamelCase, source_url + test_images_file )
with gfile.Open(__lowerCamelCase, 'rb' ) as f:
A : Optional[int] =_extract_images(__lowerCamelCase )
A : Optional[int] =_maybe_download(
__lowerCamelCase, __lowerCamelCase, source_url + test_labels_file )
with gfile.Open(__lowerCamelCase, 'rb' ) as f:
A : List[str] =_extract_labels(__lowerCamelCase, one_hot=__lowerCamelCase )
if not 0 <= validation_size <= len(__lowerCamelCase ):
A : List[Any] =(
"""Validation size should be between 0 and """
F'{len(__lowerCamelCase )}. Received: {validation_size}.'
)
raise ValueError(__lowerCamelCase )
A : Dict =train_images[:validation_size]
A : Dict =train_labels[:validation_size]
A : Dict =train_images[validation_size:]
A : Optional[int] =train_labels[validation_size:]
A : Tuple ={"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
A : Tuple =_DataSet(__lowerCamelCase, __lowerCamelCase, **__lowerCamelCase )
A : List[str] =_DataSet(__lowerCamelCase, __lowerCamelCase, **__lowerCamelCase )
A : Optional[int] =_DataSet(__lowerCamelCase, __lowerCamelCase, **__lowerCamelCase )
return _Datasets(train=__lowerCamelCase, validation=__lowerCamelCase, test=__lowerCamelCase )
| 305
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : Tuple = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Union[str, Any] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("""encoder""" ):
__UpperCAmelCase : List[str] = k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase : List[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase : Any = k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase : Dict = sd.pop(__lowerCamelCase )
__UpperCAmelCase : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
a : Optional[int] = ["START"]
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
__UpperCAmelCase : str = torch.load(__lowerCamelCase , map_location="""cpu""" )
__UpperCAmelCase : Tuple = model["""model"""]
__UpperCAmelCase : int = BlenderbotConfig.from_json_file(__lowerCamelCase )
__UpperCAmelCase : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
a : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63
| 0
|
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = 42
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = ("DownEncoderBlock2D",) , snake_case__ = ("UpDecoderBlock2D",) , snake_case__ = (64,) , snake_case__ = 1 , snake_case__ = "silu" , snake_case__ = 3 , snake_case__ = 32 , snake_case__ = 256 , snake_case__ = 32 , snake_case__ = None , snake_case__ = 0.18_215 , snake_case__ = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_SCREAMING_SNAKE_CASE : Optional[Any] = Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
_SCREAMING_SNAKE_CASE : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels
_SCREAMING_SNAKE_CASE : Tuple = nn.Convad(snake_case__ , snake_case__ , 1 )
_SCREAMING_SNAKE_CASE : Optional[int] = VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
_SCREAMING_SNAKE_CASE : Optional[int] = Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = True ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = self.encoder(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
"""simple docstring"""
if not force_not_quantize:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.quantize(snake_case__ )
else:
_SCREAMING_SNAKE_CASE : int = h
_SCREAMING_SNAKE_CASE : Dict = self.post_quant_conv(snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = True ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = sample
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.encode(snake_case__ ).latents
_SCREAMING_SNAKE_CASE : Any = self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 295
| 0
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A (__lowerCamelCase :List[str] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def A (__lowerCamelCase :str ):
# word like '180' or '身高' or '神'
for char in word:
_lowerCAmelCase = ord(__lowerCamelCase )
if not _is_chinese_char(__lowerCamelCase ):
return 0
return 1
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = set()
for token in tokens:
_lowerCAmelCase = len(__lowerCamelCase ) > 1 and is_chinese(__lowerCamelCase )
if chinese_word:
word_set.add(__lowerCamelCase )
_lowerCAmelCase = list(__lowerCamelCase )
return word_list
def A (__lowerCamelCase :List[str] , __lowerCamelCase :set() ):
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase = max([len(__lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase = bert_tokens
_lowerCAmelCase , _lowerCAmelCase = 0, len(__lowerCamelCase )
while start < end:
_lowerCAmelCase = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase = min(end - start , __lowerCamelCase )
for i in range(__lowerCamelCase , 1 , -1 ):
_lowerCAmelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase = """##""" + bert_word[j]
_lowerCAmelCase = start + i
_lowerCAmelCase = False
break
if single_word:
start += 1
return bert_word
def A (__lowerCamelCase :List[str] , __lowerCamelCase :LTP , __lowerCamelCase :BertTokenizer ):
_lowerCAmelCase = []
for i in range(0 , len(__lowerCamelCase ) , 100 ):
_lowerCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
_lowerCAmelCase = [get_chinese_word(__lowerCamelCase ) for r in res]
ltp_res.extend(__lowerCamelCase )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_lowerCAmelCase = []
for i in range(0 , len(__lowerCamelCase ) , 100 ):
_lowerCAmelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCamelCase , truncation=__lowerCamelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_lowerCAmelCase = []
for input_ids, chinese_word in zip(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = []
for id in input_ids:
_lowerCAmelCase = bert_tokenizer._convert_id_to_token(__lowerCamelCase )
input_tokens.append(__lowerCamelCase )
_lowerCAmelCase = add_sub_symbol(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase = token[2:]
# save chinese tokens' pos
if len(__lowerCamelCase ) == 1 and _is_chinese_char(ord(__lowerCamelCase ) ):
ref_id.append(__lowerCamelCase )
ref_ids.append(__lowerCamelCase )
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
return ref_ids
def A (__lowerCamelCase :Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = [line.strip() for line in data if len(__lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase = prepare_ref(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase = [json.dumps(__lowerCamelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_lowercase = parser.parse_args()
main(args)
| 5
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCAmelCase__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCAmelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
UpperCAmelCase__ = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
UpperCAmelCase__ = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
UpperCAmelCase__ = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
UpperCAmelCase__ = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCAmelCase__ = np.expand_dims(test_image, axis=0)
UpperCAmelCase__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCAmelCase__ = 'Normal'
if result[0][0] == 1:
UpperCAmelCase__ = 'Abnormality detected'
| 224
| 0
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _A : str , _A : str , _A : str ) ->Union[str, Any]:
"""simple docstring"""
def get_masked_lm_array(_A : str ):
lowerCamelCase_ =f'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
lowerCamelCase_ =array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_array(_A : str ):
lowerCamelCase_ =f'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
lowerCamelCase_ =array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_layer_array(_A : int , _A : str ):
lowerCamelCase_ =f'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
lowerCamelCase_ =array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_attention_layer_array(_A : int , _A : str , _A : Dict ):
lowerCamelCase_ =f'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case )
lowerCamelCase_ =array.reshape(__snake_case )
if "kernel" in name:
lowerCamelCase_ =array.transpose()
return torch.from_numpy(__snake_case )
print(f'Loading model based on config from {config_path}...' )
lowerCamelCase_ =BertConfig.from_json_file(__snake_case )
lowerCamelCase_ =BertForMaskedLM(__snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowerCamelCase_ =model.bert.encoder.layer[layer_index]
# Self-attention
lowerCamelCase_ =layer.attention.self
lowerCamelCase_ =get_encoder_attention_layer_array(
__snake_case , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
lowerCamelCase_ =get_encoder_attention_layer_array(
__snake_case , """_query_dense/bias""" , self_attn.query.bias.data.shape )
lowerCamelCase_ =get_encoder_attention_layer_array(
__snake_case , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
lowerCamelCase_ =get_encoder_attention_layer_array(
__snake_case , """_key_dense/bias""" , self_attn.key.bias.data.shape )
lowerCamelCase_ =get_encoder_attention_layer_array(
__snake_case , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
lowerCamelCase_ =get_encoder_attention_layer_array(
__snake_case , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
lowerCamelCase_ =layer.attention.output
lowerCamelCase_ =get_encoder_attention_layer_array(
__snake_case , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
lowerCamelCase_ =get_encoder_attention_layer_array(
__snake_case , """_output_dense/bias""" , self_output.dense.bias.data.shape )
lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_attention_layer_norm/gamma""" )
lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_attention_layer_norm/beta""" )
# Intermediate
lowerCamelCase_ =layer.intermediate
lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_intermediate_dense/kernel""" )
lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_intermediate_dense/bias""" )
# Output
lowerCamelCase_ =layer.output
lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_output_dense/kernel""" )
lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_output_dense/bias""" )
lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_output_layer_norm/gamma""" )
lowerCamelCase_ =get_encoder_layer_array(__snake_case , """_output_layer_norm/beta""" )
# Embeddings
lowerCamelCase_ =get_encoder_array("""_position_embedding_layer/embeddings""" )
lowerCamelCase_ =get_encoder_array("""_type_embedding_layer/embeddings""" )
lowerCamelCase_ =get_encoder_array("""_embedding_norm_layer/gamma""" )
lowerCamelCase_ =get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
lowerCamelCase_ =model.cls.predictions.transform
lowerCamelCase_ =get_masked_lm_array("""dense/kernel""" )
lowerCamelCase_ =get_masked_lm_array("""dense/bias""" )
lowerCamelCase_ =get_masked_lm_array("""layer_norm/gamma""" )
lowerCamelCase_ =get_masked_lm_array("""layer_norm/beta""" )
lowerCamelCase_ =get_masked_lm_array("""embedding_table""" )
# Pooling
lowerCamelCase_ =BertPooler(config=__snake_case )
lowerCamelCase_ =get_encoder_array("""_pooler_layer/kernel""" )
lowerCamelCase_ =get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(__snake_case )
# Integration test - should load without any errors ;)
lowerCamelCase_ =BertForMaskedLM.from_pretrained(__snake_case )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__A : List[str] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 713
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] )
lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature:
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ ={"""pixel_values""": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 75
| 0
|
class A :
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : str = "" , _UpperCAmelCase : bool = False ) -> None:
"""simple docstring"""
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowerCamelCase__ (self : str , _UpperCAmelCase : str ) -> tuple[str, str, str]:
"""simple docstring"""
lowercase__ = 0
for q, w in zip(self.prefix , _UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : list[str] ) -> None:
"""simple docstring"""
for word in words:
self.insert(_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=_UpperCAmelCase , is_leaf=_UpperCAmelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
_UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str ) -> bool:
"""simple docstring"""
lowercase__ = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : str ) -> bool:
"""simple docstring"""
lowercase__ = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : int = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ) -> bool:
"""simple docstring"""
lowercase__ = """banana bananas bandana band apple all beast""".split()
lowercase__ = RadixNode()
root.insert_many(__magic_name__ )
assert all(root.find(__magic_name__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def UpperCamelCase ( ) -> None:
"""simple docstring"""
assert test_trie()
def UpperCamelCase ( ) -> None:
"""simple docstring"""
lowercase__ = RadixNode()
lowercase__ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__magic_name__ )
print("""Words:""" , __magic_name__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 15
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir('''fixtures''')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Any ) -> Any:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 500
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__lowerCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : int ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase_ ( cls : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def lowercase_ ( cls : Optional[int] ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowercase_ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id='''test-feature-extractor''' , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def lowercase_ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCamelCase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def lowercase_ ( self : int ) -> int:
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(__lowerCamelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 493
| 0
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Dict = []
_lowercase: List[str] = []
_lowercase: int = []
for rt in rc.restypes:
_lowercase: str = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowercase: List[str] = {name: i for i, name in enumerate(__magic_name__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
_lowercase: List[str] = torch.tensor(
__magic_name__ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowercase: Union[str, Any] = torch.tensor(
__magic_name__ , dtype=torch.intaa , device=protein["aatype"].device , )
_lowercase: Optional[Any] = torch.tensor(
__magic_name__ , dtype=torch.floataa , device=protein["aatype"].device , )
_lowercase: List[str] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowercase: Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
_lowercase: Any = restype_atomaa_mask[protein_aatype]
_lowercase: Dict = residx_atomaa_mask
_lowercase: List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowercase: List[str] = restype_atomaa_to_atomaa[protein_aatype]
_lowercase: Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowercase: Tuple = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowercase: Optional[Any] = rc.restype_atoa[restype_letter]
_lowercase: Union[str, Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowercase: List[Any] = rc.atom_order[atom_name]
_lowercase: Any = 1
_lowercase: Tuple = restype_atomaa_mask[protein_aatype]
_lowercase: int = residx_atomaa_mask
return protein
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Tuple = tree_map(lambda __magic_name__ : torch.tensor(__magic_name__ , device=batch["aatype"].device ) , __magic_name__ , np.ndarray )
_lowercase: Dict = tensor_tree_map(lambda __magic_name__ : np.array(__magic_name__ ) , make_atomaa_masks(__magic_name__ ) )
return out
| 206
|
def __lowerCAmelCase ( __magic_name__ ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_SCREAMING_SNAKE_CASE : str = int(input('Enter number: ').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 206
| 1
|
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = {}
def _A ( self : str ):
print(self.vertex )
for i in self.vertex:
print(UpperCAmelCase_ , " -> " , " -> ".join([str(UpperCAmelCase_ ) for j in self.vertex[i]] ) )
def _A ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCAmelCase_ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE : List[str] = [to_vertex]
def _A ( self : int ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE : Any = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE : Any = True
print(UpperCAmelCase_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 62
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a : Optional[int] = logging.getLogger(__name__)
@dataclass
class _a ( _lowerCAmelCase ):
A = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
A = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 556
| 0
|
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Any:
stooge(__UpperCAmelCase , 0 , len(__UpperCAmelCase ) - 1 )
return arr
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: str , __UpperCAmelCase: Union[str, Any] ) -> List[Any]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ ,UpperCamelCase__ : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase , __UpperCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__UpperCAmelCase , i + t , (__UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(__UpperCAmelCase , __UpperCAmelCase , (h - t) )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 369
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ = 13, __magic_name__ = 64, __magic_name__ = 2, __magic_name__ = 3, __magic_name__ = 3, __magic_name__ = True, __magic_name__ = True, __magic_name__ = 128, __magic_name__=[16, 32, 64, 128], __magic_name__ = 7, __magic_name__ = 4, __magic_name__ = 37, __magic_name__ = "gelu", __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 10, __magic_name__ = 0.02, __magic_name__ = 2, __magic_name__ = 1, __magic_name__ = 128, __magic_name__ = [2, 2, 2, 2], __magic_name__ = 2, __magic_name__ = 2, ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[int] = patch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : int = is_training
UpperCamelCase__ : str = use_labels
UpperCamelCase__ : Optional[Any] = hidden_size
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Dict = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Optional[int] = encoder_stride
UpperCamelCase__ : Any = num_attention_outputs
UpperCamelCase__ : Dict = embed_dim
UpperCamelCase__ : str = embed_dim + 1
UpperCamelCase__ : int = resolution
UpperCamelCase__ : List[str] = depths
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : Tuple = dim
UpperCamelCase__ : Optional[int] = mlp_expansion_ratio
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__magic_name__, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = TFEfficientFormerModel(config=__magic_name__ )
UpperCamelCase__ : str = model(__magic_name__, training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.type_sequence_label_size
UpperCamelCase__ : Dict = TFEfficientFormerForImageClassification(__magic_name__ )
UpperCamelCase__ : Any = model(__magic_name__, labels=__magic_name__, training=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : List[str] = TFEfficientFormerForImageClassification(__magic_name__ )
UpperCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Union[str, Any] = model(__magic_name__, labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = config_and_inputs
UpperCamelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : int = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a : Union[str, Any] = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a : Any = False
a : Tuple = False
a : Any = False
a : int = False
a : Tuple = False
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = TFEfficientFormerModelTester(self )
UpperCamelCase__ : int = ConfigTester(
self, config_class=__magic_name__, has_text_modality=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(__magic_name__ )
UpperCamelCase__ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[str] = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
def check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : Union[str, Any] = model_class(__magic_name__ )
UpperCamelCase__ : str = model(**self._prepare_for_class(__magic_name__, __magic_name__ ), training=__magic_name__ )
UpperCamelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Optional[int] = getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
if hasattr(self.model_tester, '''encoder_seq_length''' ):
UpperCamelCase__ : Dict = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, '''chunk_length''' ) and self.model_tester.chunk_length > 1:
UpperCamelCase__ : Tuple = seq_length * self.model_tester.chunk_length
else:
UpperCamelCase__ : Optional[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
if config.is_encoder_decoder:
UpperCamelCase__ : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(__magic_name__, (list, tuple) )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
UpperCamelCase__ : str = getattr(self.model_tester, '''seq_length''', __magic_name__ )
UpperCamelCase__ : Optional[Any] = getattr(self.model_tester, '''decoder_seq_length''', __magic_name__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [decoder_seq_length, self.model_tester.hidden_size], )
UpperCamelCase__ ,UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=False ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = super()._prepare_for_class(__magic_name__, __magic_name__, return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = TFEfficientFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Optional[Any] = getattr(self.model_tester, '''seq_length''', __magic_name__ )
UpperCamelCase__ : Any = getattr(self.model_tester, '''encoder_seq_length''', __magic_name__ )
UpperCamelCase__ : Tuple = getattr(self.model_tester, '''key_length''', __magic_name__ )
UpperCamelCase__ : Union[str, Any] = getattr(self.model_tester, '''chunk_length''', __magic_name__ )
if chunk_length is not None and hasattr(self.model_tester, '''num_hashes''' ):
UpperCamelCase__ : Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : Any = True
UpperCamelCase__ : str = model_class(__magic_name__ )
UpperCamelCase__ : Optional[int] = model(**self._prepare_for_class(__magic_name__, __magic_name__ ), training=__magic_name__ )
UpperCamelCase__ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ), self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : int = model_class(__magic_name__ )
UpperCamelCase__ : str = model(**self._prepare_for_class(__magic_name__, __magic_name__ ), training=__magic_name__ )
UpperCamelCase__ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ), self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
UpperCamelCase__ ,UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase__ : str = model_class(__magic_name__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase__ : Tuple = {
key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=__magic_name__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase__ : str = model(__magic_name__ )
self.assertTrue(outputs_dict is not None )
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : str = image_processor(images=__magic_name__, return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : Dict = model(**__magic_name__, training=__magic_name__ )
# verify the logits
UpperCamelCase__ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : List[str] = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3], __magic_name__, atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
UpperCamelCase__ : List[str] = self.default_image_processor
UpperCamelCase__ : Union[str, Any] = prepare_img()
UpperCamelCase__ : int = image_processor(images=__magic_name__, return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : Tuple = model(**__magic_name__, training=__magic_name__ )
# verify the logits
UpperCamelCase__ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : Optional[int] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3], __magic_name__, atol=1E-4 ) )
| 369
| 1
|
import sys
__A = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase__ ( A_: str ) -> int:
"""simple docstring"""
__UpperCAmelCase =1
for digit in s:
product *= int(A_ )
return product
def lowercase__ ( A_: str = N ) -> int:
"""simple docstring"""
__UpperCAmelCase =-sys.maxsize - 1
__UpperCAmelCase =n[:13]
__UpperCAmelCase =13
while cur_index < len(A_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__UpperCAmelCase =substr[1:] + n[cur_index]
cur_index += 1
else:
__UpperCAmelCase =max(A_ , str_eval(A_ ) )
__UpperCAmelCase =n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52
| 0
|
from collections import deque
from .hash_table import HashTable
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : str , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
__snake_case : List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCAmelCase )
__snake_case : Dict = self.values[key]
def snake_case__ ( self : Any ):
return (
sum(self.charge_factor - len(_lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCAmelCase , _lowerCAmelCase )
| 720
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : List[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : List[str] = False
A : List[Any] = False
def snake_case__ ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=False ):
__snake_case : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
__snake_case : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int=13 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Tuple=None , ):
__snake_case : int = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Dict = seq_length
__snake_case : Optional[int] = is_training
__snake_case : str = use_input_mask
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : Any = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[str] = num_labels
__snake_case : str = num_choices
__snake_case : Optional[int] = scope
__snake_case : Any = embedding_size
def snake_case__ ( self : str ):
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : Tuple = None
__snake_case : int = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : int = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[int] = TFMobileBertModel(config=_lowerCAmelCase )
__snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[Any] = model(_lowerCAmelCase )
__snake_case : Tuple = [input_ids, input_mask]
__snake_case : Tuple = model(_lowerCAmelCase )
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
__snake_case : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : str = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
__snake_case : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : str = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] ):
__snake_case : Any = TFMobileBertForPreTraining(config=_lowerCAmelCase )
__snake_case : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : List[str] = self.num_labels
__snake_case : Optional[int] = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
__snake_case : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
__snake_case : Any = self.num_choices
__snake_case : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
__snake_case : Dict = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : int = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : Dict = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ):
__snake_case : List[str] = self.num_labels
__snake_case : Any = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
__snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
__snake_case : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
__snake_case : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__snake_case : int = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case__ ( self : List[str] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : str ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def snake_case__ ( self : Tuple ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__snake_case : Dict = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def snake_case__ ( self : Optional[Any] ):
__snake_case : int = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
__snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case : Optional[int] = model(_lowerCAmelCase )[0]
__snake_case : List[str] = [1, 6, 3_05_22]
self.assertEqual(output.shape , _lowerCAmelCase )
__snake_case : List[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 )
| 390
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'gptsan-japanese'
UpperCamelCase__ = [
'past_key_values',
]
UpperCamelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , snake_case_=3_60_00 , snake_case_=12_80 , snake_case_=10_24 , snake_case_=81_92 , snake_case_=40_96 , snake_case_=1_28 , snake_case_=10 , snake_case_=0 , snake_case_=16 , snake_case_=16 , snake_case_=1_28 , snake_case_=0.0 , snake_case_=1E-5 , snake_case_=False , snake_case_=0.0 , snake_case_="float32" , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=0.0_02 , snake_case_=False , snake_case_=True , snake_case_=3_59_98 , snake_case_=3_59_95 , snake_case_=3_59_99 , **snake_case_ , ):
lowercase =vocab_size
lowercase =max_position_embeddings
lowercase =d_model
lowercase =d_ff
lowercase =d_ext
lowercase =d_spout
lowercase =num_switch_layers
lowercase =num_ext_layers
lowercase =num_switch_layers + num_ext_layers
lowercase =num_heads
lowercase =num_experts
lowercase =expert_capacity
lowercase =dropout_rate
lowercase =layer_norm_epsilon
lowercase =router_bias
lowercase =router_jitter_noise
lowercase =router_dtype
lowercase =router_ignore_padding_tokens
lowercase =output_hidden_states
lowercase =output_attentions
lowercase =initializer_factor
lowercase =output_router_logits
lowercase =use_cache
super().__init__(
separator_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
| 72
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def snake_case_ (__A : Path , __A : list ) -> List[Any]:
__lowerCAmelCase : Any = """\n""".join(__A )
Path(__A ).open("""w""" ).writelines(__A )
__UpperCAmelCase = """patrickvonplaten/t5-tiny-random"""
__UpperCAmelCase = """sshleifer/bart-tiny-random"""
__UpperCAmelCase = """sshleifer/tiny-mbart"""
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCAmelCase : str = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCAmelCase : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__lowerCAmelCase : Optional[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCAmelCase : Any = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_generate()
assert Path(lowerCAmelCase ).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCAmelCase : List[str] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCAmelCase : Union[str, Any] = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__lowerCAmelCase : List[str] = Path(self.get_auto_remove_tmp_dir() )
__lowerCAmelCase : List[str] = str(tmp_dir / """scores.json""" )
__lowerCAmelCase : Dict = str(tmp_dir / """val.target""" )
_dump_articles(lowerCAmelCase , text["""en"""] )
_dump_articles(lowerCAmelCase , text["""de"""] )
__lowerCAmelCase : List[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCAmelCase : Dict = f'''
run_eval_search.py
{model}
{str(lowerCAmelCase )}
{str(lowerCAmelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
with CaptureStdout() as cs:
run_search()
__lowerCAmelCase : Any = [""" num_beams | length_penalty""", model, """Best score args"""]
__lowerCAmelCase : Tuple = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(lowerCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase ).exists()
os.remove(Path(lowerCAmelCase ) )
| 651
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( __a ):
"""simple docstring"""
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , """embed_dim""" ) )
self.parent.assertTrue(hasattr(lowercase_ , """num_heads""" ) )
class _a :
"""simple docstring"""
def __init__( self : Any , lowercase_ : int , lowercase_ : List[Any]=13 , lowercase_ : Tuple=64 , lowercase_ : Tuple=3 , lowercase_ : Union[str, Any]=[16, 48, 96] , lowercase_ : Dict=[1, 3, 6] , lowercase_ : Optional[int]=[1, 2, 10] , lowercase_ : Tuple=[7, 3, 3] , lowercase_ : Any=[4, 2, 2] , lowercase_ : Dict=[2, 1, 1] , lowercase_ : int=[2, 2, 2] , lowercase_ : Optional[Any]=[False, False, True] , lowercase_ : Union[str, Any]=[0.0, 0.0, 0.0] , lowercase_ : int=0.0_2 , lowercase_ : Tuple=1e-12 , lowercase_ : str=True , lowercase_ : int=True , lowercase_ : List[str]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_sizes
lowercase_ = patch_stride
lowercase_ = patch_padding
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = num_labels
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = num_heads
lowercase_ = stride_kv
lowercase_ = depth
lowercase_ = cls_token
lowercase_ = attention_drop_rate
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase_ = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : str ):
'''simple docstring'''
lowercase_ = TFCvtModel(config=lowercase_ )
lowercase_ = model(lowercase_ , training=lowercase_ )
lowercase_ = (self.image_size, self.image_size)
lowercase_ , lowercase_ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase_ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase_ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = TFCvtForImageClassification(lowercase_ )
lowercase_ = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _a ( __a , __a , unittest.TestCase ):
"""simple docstring"""
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = TFCvtModelTester(self )
lowercase_ = TFCvtConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(lowercase_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowercase_ )
lowercase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict ):
lowercase_ = model_class(lowercase_ )
lowercase_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ = outputs.hidden_states
lowercase_ = len(self.model_tester.depth )
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = TFCvtModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def A_ ( ) ->Union[str, Any]:
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowercase_ , return_tensors="""tf""" )
# forward pass
lowercase_ = model(**lowercase_ )
# verify the logits
lowercase_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1e-4 ) )
| 700
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase_ = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase_ = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 603
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCAmelCase = ["""gpt2"""]
_lowerCAmelCase = """gpt2"""
if is_tf_available():
class _UpperCAmelCase ( tf.Module ):
def __init__( self , a__ ):
super().__init__()
A_ : List[Any] = tokenizer
A_ : Optional[Any] = AutoConfig.from_pretrained(lowercase__ )
A_ : str = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def _lowerCamelCase ( self , a__ ):
A_ : List[str] = self.tokenizer(lowercase__ )
A_ : Any = tokenized["input_ids"].to_tensor()
A_ : int = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
A_ : Union[str, Any] = self.model(input_ids=lowercase__ , attention_mask=lowercase__ )["logits"]
return outputs
@require_tf
@require_keras_nlp
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
super().setUp()
A_ : List[Any] = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
A_ : Any = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A_ : List[str] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
A_ : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCamelCase ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
A_ : int = tokenizer([test_inputs] , return_tensors="""tf""" )
A_ : str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
A_ : Optional[Any] = python_outputs[key].numpy()
A_ : Union[str, Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def _lowerCamelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
A_ : int = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
A_ : Optional[int] = tf.constant(lowercase__ )
A_ : Dict = compiled_tokenizer(lowercase__ )
A_ : Any = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCamelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
A_ : str = ModelToSave(tokenizer=lowercase__ )
A_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : int = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A_ : str = Path(lowercase__ ) / "saved.model"
tf.saved_model.save(lowercase__ , lowercase__ , signatures={"""serving_default""": model.serving} )
A_ : Union[str, Any] = tf.saved_model.load(lowercase__ )
A_ : Optional[int] = loaded_model.signatures["serving_default"](lowercase__ )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _lowerCamelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
A_ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : str = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
A_ : int = tf_tokenizer.get_config()
A_ : int = TFGPTaTokenizer.from_config(lowercase__ )
A_ : Dict = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _lowerCamelCase ( self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
A_ : str = 123123
for max_length in [3, 5, 1024]:
A_ : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
A_ : List[Any] = tf_tokenizer(lowercase__ , max_length=lowercase__ )
A_ : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 569
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
snake_case_ = parser.parse_args()
snake_case_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 421
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_lowerCAmelCase = {"allegro/herbert-base-cased": 5_1_4}
_lowerCAmelCase = {}
class __A ( a ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = HerbertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase="</s>" , **_lowerCamelCase , )-> Tuple:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sep_token=_lowerCamelCase , **_lowerCamelCase , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None )-> List[int]:
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None )-> List[int]:
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None )-> Tuple[str]:
lowercase__ = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 710
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , **_lowerCamelCase )-> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 318
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( __a , unittest.TestCase ):
A__ = DiTPipeline
A__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A__ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
A__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A__ = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=a__ , )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL()
_SCREAMING_SNAKE_CASE : int = DDIMScheduler()
_SCREAMING_SNAKE_CASE : Optional[int] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
if str(a__ ).startswith("mps" ):
_SCREAMING_SNAKE_CASE : Any = torch.manual_seed(a__ )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_SCREAMING_SNAKE_CASE : Tuple = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = "cpu"
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(a__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**a__ ).images
_SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_SCREAMING_SNAKE_CASE : Any = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_SCREAMING_SNAKE_CASE : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
_SCREAMING_SNAKE_CASE : Optional[int] = ["vase", "umbrella", "white shark", "white wolf"]
_SCREAMING_SNAKE_CASE : Dict = pipe.get_label_ids(a__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(a__ , a__ ):
_SCREAMING_SNAKE_CASE : int = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
_SCREAMING_SNAKE_CASE : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
_SCREAMING_SNAKE_CASE : Optional[int] = ["vase", "umbrella"]
_SCREAMING_SNAKE_CASE : str = pipe.get_label_ids(a__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Any = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(a__ , a__ ):
_SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 572
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowerCAmelCase = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowerCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] ):
__magic_name__ = WATERMARK_BITS
__magic_name__ = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def snake_case__ ( self : Optional[Any] , a__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
__magic_name__ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = [self.encoder.encode(a__ , '''dwtDct''' ) for image in images]
__magic_name__ = torch.from_numpy(np.array(a__ ) ).permute(0 , 3 , 1 , 2 )
__magic_name__ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 432
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a_ = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __SCREAMING_SNAKE_CASE ( lowercase_=None ) -> Optional[int]:
"""simple docstring"""
if subparsers is not None:
__UpperCamelCase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__UpperCamelCase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__UpperCamelCase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=snake_case_ , default=snake_case_ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=snake_case_ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=snake_case_ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__UpperCamelCase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=snake_case_ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
__UpperCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(snake_case_ ):
__UpperCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCamelCase = defaults.commands
if not args.tpu_name:
__UpperCamelCase = defaults.tpu_name
if not args.tpu_zone:
__UpperCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCamelCase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__UpperCamelCase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , snake_case_ ):
__UpperCamelCase = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__UpperCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , snake_case_ ):
__UpperCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCamelCase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__UpperCamelCase = '''; '''.join(snake_case_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCamelCase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(snake_case_ )}" )
return
subprocess.run(snake_case_ )
print('''Successfully setup pod.''' )
def __SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
__UpperCamelCase = tpu_command_parser()
__UpperCamelCase = parser.parse_args()
tpu_command_launcher(snake_case_ )
| 719
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = ["input_features", "is_longer"]
def __init__( self : Dict , snake_case : int=64 , snake_case : Dict=48000 , snake_case : Tuple=480 , snake_case : Optional[Any]=10 , snake_case : Union[str, Any]=1024 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=False , snake_case : float = 0 , snake_case : float = 14000 , snake_case : int = None , snake_case : str = "fusion" , snake_case : str = "repeatpad" , **snake_case : int , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
__UpperCamelCase = top_db
__UpperCamelCase = truncation
__UpperCamelCase = padding
__UpperCamelCase = fft_window_size
__UpperCamelCase = (fft_window_size >> 1) + 1
__UpperCamelCase = hop_length
__UpperCamelCase = max_length_s
__UpperCamelCase = max_length_s * sampling_rate
__UpperCamelCase = sampling_rate
__UpperCamelCase = frequency_min
__UpperCamelCase = frequency_max
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='''htk''' , )
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case ( self : Tuple , snake_case : np.array , snake_case : Optional[np.array] = None ):
__UpperCamelCase = spectrogram(
snake_case , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='''dB''' , )
return log_mel_spectrogram.T
def snake_case ( self : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : int ):
__UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
# randomly choose index for each part
__UpperCamelCase = np.random.choice(ranges[0] )
__UpperCamelCase = np.random.choice(ranges[1] )
__UpperCamelCase = np.random.choice(ranges[2] )
__UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__UpperCamelCase = torch.tensor(mel[None, None, :] )
__UpperCamelCase = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=snake_case )
__UpperCamelCase = mel_shrink[0][0].numpy()
__UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def snake_case ( self : Optional[Any] , snake_case : np.array , snake_case : Optional[int] , snake_case : Tuple , snake_case : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCamelCase = len(snake_case ) - max_length
__UpperCamelCase = np.random.randint(0 , overflow + 1 )
__UpperCamelCase = waveform[idx : idx + max_length]
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCamelCase = False
else:
__UpperCamelCase = self._random_mel_fusion(snake_case , snake_case , snake_case )
__UpperCamelCase = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
__UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCamelCase = int(max_length / len(snake_case ) )
__UpperCamelCase = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCamelCase = int(max_length / len(snake_case ) )
__UpperCamelCase = np.stack(np.tile(snake_case , snake_case ) )
__UpperCamelCase = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case : str = None , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Any , ):
__UpperCamelCase = truncation if truncation is not None else self.truncation
__UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCamelCase = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
__UpperCamelCase = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
__UpperCamelCase = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCamelCase = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
__UpperCamelCase = []
__UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCamelCase = np.random.randint(0 , len(snake_case ) )
__UpperCamelCase = True
if isinstance(input_mel[0] , snake_case ):
__UpperCamelCase = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCamelCase = [[longer] for longer in is_longer]
__UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
__UpperCamelCase = BatchFeature(snake_case )
if return_tensors is not None:
__UpperCamelCase = input_features.convert_to_tensors(snake_case )
return input_features
| 375
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
__snake_case = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__snake_case = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__snake_case = '▁'
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Any = VOCAB_FILES_NAMES
_a : Any = PRETRAINED_VOCAB_FILES_MAP
_a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
lowercase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : int = vocab_file
lowercase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
lowercase__ : List[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase__ : Optional[Any] = len(self.sp_model ) - 1
lowercase__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
lowercase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : Any = [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__( self ) -> List[str]:
return len(self.sp_model )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : List[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : str = []
lowercase__ : str = """"""
lowercase__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ : Any = True
lowercase__ : Optional[Any] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
lowercase__ : Any = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self ) -> Any:
lowercase__ : List[str] = self.__dict__.copy()
lowercase__ : str = None
return state
def __setstate__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : List[str] = {}
lowercase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 200
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case = 'src/transformers'
__snake_case = 'docs/source/en'
__snake_case = '.'
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ):
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase__ : List[str] = f.readlines()
# Find the start prompt.
lowercase__ : str = 0
while not lines[start_index].startswith(lowerCamelCase__ ):
start_index += 1
start_index += 1
lowercase__ : Optional[Any] = start_index
while not lines[end_index].startswith(lowerCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__snake_case = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCamelCase ( lowerCamelCase__ : List[Any] ):
lowercase__ : Union[str, Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase__ )
return [m.group(0 ) for m in matches]
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ):
lowercase__ : Any = 2 if text == """✅""" or text == """❌""" else len(lowerCamelCase__ )
lowercase__ : int = (width - text_length) // 2
lowercase__ : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCamelCase ( ):
lowercase__ : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase__ : Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase__ : str = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase__ : Tuple = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Optional[Any] = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Optional[int] = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Dict = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase__ ):
lowercase__ : Optional[Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase__ : List[Any] = slow_tokenizers
lowercase__ : List[str] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase__ : Tuple = fast_tokenizers
lowercase__ : str = attr_name[:-13]
elif _re_tf_models.match(lowerCamelCase__ ) is not None:
lowercase__ : Tuple = tf_models
lowercase__ : List[Any] = _re_tf_models.match(lowerCamelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase__ ) is not None:
lowercase__ : List[Any] = flax_models
lowercase__ : List[Any] = _re_flax_models.match(lowerCamelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase__ ) is not None:
lowercase__ : Union[str, Any] = pt_models
lowercase__ : Optional[int] = _re_pt_models.match(lowerCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase__ : Tuple = True
break
# Try again after removing the last word in the name
lowercase__ : Union[str, Any] = """""".join(camel_case_split(lowerCamelCase__ )[:-1] )
# Let's build that table!
lowercase__ : Union[str, Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase__ : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase__ : str = [len(lowerCamelCase__ ) + 2 for c in columns]
lowercase__ : Tuple = max([len(lowerCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase__ : List[Any] = """|""" + """|""".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for c, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase__ : List[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase__ : int = model_name_to_prefix[name]
lowercase__ : List[str] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for l, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + "|\n"
return table
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any]=False ):
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = _find_text_in_file(
filename=os.path.join(lowerCamelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase__ : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 200
| 1
|
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if not sentence:
return ""
_UpperCAmelCase = dict(zip(lowerCAmelCase_,lowerCAmelCase_ ) )
return lower_to_upper.get(sentence[0],sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 494
| 0
|
import functools
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__lowerCAmelCase ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
snake_case__ = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 134
| 0
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( lowerCamelCase : Union[dict, list, tuple, torch.Tensor] ):
lowerCAmelCase = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def a_ ( lowerCamelCase : int , lowerCamelCase : Tuple[int, ...] ):
lowerCAmelCase = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
lowerCAmelCase = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def a_ ( lowerCamelCase : Sequence[int] , lowerCamelCase : Sequence[int] , lowerCamelCase : Sequence[int] , lowerCamelCase : Optional[Sequence[bool]] = None , lowerCamelCase : Optional[Sequence[bool]] = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase : List[bool] ) -> None:
lowerCAmelCase = True
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCAmelCase = l[reversed_idx]
if start_edges is None:
lowerCAmelCase = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
lowerCAmelCase = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCAmelCase = []
lowerCAmelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
lowerCAmelCase = tuple(lowerCamelCase )
lowerCAmelCase = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCAmelCase = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCAmelCase = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCAmelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a_ ( lowerCamelCase : torch.Tensor , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = t.shape[:no_batch_dims]
lowerCAmelCase = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
lowerCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
lowerCAmelCase = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
lowerCAmelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a_ ( lowerCamelCase : Callable , lowerCamelCase : Dict[str, Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool = False , lowerCamelCase : Any = None , lowerCamelCase : bool = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError('Must provide at least one input' )
lowerCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
lowerCAmelCase = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCAmelCase = tensor_tree_map(_prep_inputs , lowerCamelCase )
lowerCAmelCase = None
if _out is not None:
lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCAmelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCAmelCase = 0
lowerCAmelCase = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
lowerCAmelCase = _select_chunk
else:
lowerCAmelCase = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
lowerCAmelCase = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
lowerCAmelCase = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase : dict , lowerCamelCase : dict ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCAmelCase = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCAmelCase = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCAmelCase = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
lowerCAmelCase = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 5_1_2 , ) -> int:
lowerCAmelCase = max_chunk_size
lowerCAmelCase = None
lowerCAmelCase = None
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCAmelCase = [c for c in candidates if c > min_chunk_size]
lowerCAmelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase__ , chunk_size=UpperCAmelCase__ )
return True
except RuntimeError:
return False
lowerCAmelCase = 0
lowerCAmelCase = len(UpperCAmelCase__ ) - 1
while i > min_viable_chunk_size_index:
lowerCAmelCase = test_chunk_size(candidates[i] )
if not viable:
lowerCAmelCase = (min_viable_chunk_size_index + i) // 2
else:
lowerCAmelCase = i
lowerCAmelCase = (i + len(UpperCAmelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Iterable , UpperCAmelCase__ : Iterable ) -> bool:
lowerCAmelCase = True
for aa, aa in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert type(UpperCAmelCase__ ) == type(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
lowerCAmelCase = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
else:
consistent &= aa == aa
return consistent
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int , ) -> int:
lowerCAmelCase = True
lowerCAmelCase = tree_map(lambda UpperCAmelCase__ : a.shape if isinstance(UpperCAmelCase__ , torch.Tensor ) else a , UpperCAmelCase__ , UpperCAmelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCAmelCase__ )
lowerCAmelCase = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase__ )
else:
# Otherwise, we can reuse the precomputed value
lowerCAmelCase = False
if not consistent:
lowerCAmelCase = self._determine_favorable_chunk_size(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
lowerCAmelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 513
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__snake_case ="""scheduler_config.json"""
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Optional[int] = 2
lowerCamelCase : Union[str, Any] = 3
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : Optional[int] = 5
lowerCamelCase : Dict = 6
lowerCamelCase : List[Any] = 7
lowerCamelCase : Dict = 8
lowerCamelCase : str = 9
lowerCamelCase : Optional[Any] = 10
lowerCamelCase : Optional[Any] = 11
lowerCamelCase : int = 12
lowerCamelCase : List[Any] = 13
lowerCamelCase : List[str] = 14
@dataclass
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : torch.FloatTensor
class UpperCAmelCase_ :
lowerCamelCase : Optional[int] = SCHEDULER_CONFIG_NAME
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Optional[int] = True
@classmethod
def __UpperCAmelCase ( cls : List[str] , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Tuple=False , **UpperCAmelCase__ : str , ) -> Any:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase__ , subfolder=UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , return_commit_hash=UpperCAmelCase__ , **UpperCAmelCase__ , )
return cls.from_config(UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
self.save_config(save_directory=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
return self._get_compatibles()
@classmethod
def __UpperCAmelCase ( cls : List[str] ) -> Dict:
lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase = importlib.import_module(__name__.split('.' )[0] )
lowerCAmelCase = [
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) for c in compatible_classes_str if hasattr(UpperCAmelCase__ , UpperCAmelCase__ )
]
return compatible_classes
| 513
| 1
|
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , params=__SCREAMING_SNAKE_CASE ).content , 'html.parser' )
lowercase = soup.find('div' , attrs={'class': 'gs_ri'} )
lowercase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 84
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 108
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : List[str] )-> int:
snake_case = data
def __iter__( self : str )-> Dict:
for element in self.data:
yield element
def __lowerCamelCase ( __lowerCAmelCase : int=True ) -> Dict:
snake_case = Accelerator(even_batches=__lowerCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __lowerCamelCase ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : bool = False ) -> Dict:
if iterable:
snake_case = DummyIterableDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
else:
snake_case = TensorDataset(torch.as_tensor(range(__lowerCAmelCase ) ) )
snake_case = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase )
snake_case = accelerator.prepare(__lowerCAmelCase )
return dl
def __lowerCamelCase ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : List[int] , ) -> Dict:
snake_case = create_dataloader(accelerator=__lowerCAmelCase , dataset_size=__lowerCAmelCase , batch_size=__lowerCAmelCase )
snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __lowerCamelCase ( ) -> Dict:
snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __lowerCamelCase ( ) -> Union[str, Any]:
snake_case = create_accelerator(even_batches=__lowerCAmelCase )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __lowerCamelCase ( ) -> str:
snake_case = create_accelerator(even_batches=__lowerCAmelCase )
snake_case = torch.nn.Linear(1 , 1 )
snake_case = accelerator.prepare(__lowerCAmelCase )
snake_case = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCAmelCase ):
snake_case = ddp_model(batch[0].float() )
snake_case = output.sum()
loss.backward()
batch_idxs.append(__lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> str:
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def __lowerCamelCase ( ) -> Optional[int]:
snake_case = True
snake_case = False
snake_case = create_accelerator(even_batches=__lowerCAmelCase )
snake_case = torch.nn.Linear(1 , 1 )
snake_case = accelerator.prepare(__lowerCAmelCase )
snake_case = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
snake_case = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
snake_case = train_dl.batch_sampler.even_batches
snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __lowerCamelCase ( ) -> Union[str, Any]:
snake_case = True
snake_case = False
snake_case = create_accelerator(even_batches=__lowerCAmelCase )
snake_case = torch.nn.Linear(1 , 1 )
snake_case = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
snake_case = create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __lowerCamelCase ( ) -> Dict:
snake_case = create_accelerator()
snake_case = torch.nn.Linear(1 , 1 )
snake_case = accelerator.prepare(__lowerCAmelCase )
create_dataloader(__lowerCAmelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCAmelCase )
with warnings.catch_warnings(record=__lowerCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCAmelCase ):
pass
assert issubclass(w[-1].category , __lowerCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def __lowerCamelCase ( ) -> Dict:
snake_case = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
snake_case = accelerator.state.distributed_type
snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCAmelCase )
snake_case = original_state
if __name__ == "__main__":
main()
| 517
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_SCREAMING_SNAKE_CASE = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] )-> Tuple:
snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = """A painting of a squirrel eating a burger """
snake_case = torch.manual_seed(0 )
snake_case = pipe(
prompt=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__snake_case )
snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = generator.manual_seed(0 )
snake_case = pipe(
prompt=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = """A painting of a squirrel eating a burger """
snake_case = torch.manual_seed(0 )
snake_case = pipe(
prompt=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
snake_case = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 517
| 1
|
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase ,UpperCAmelCase ):
raise TypeError('''Input value must be an \'int\' type''' )
SCREAMING_SNAKE_CASE_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : nn.Module , lowerCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = module
SCREAMING_SNAKE_CASE_ = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase_ , bias=lowerCAmelCase_ ) , nn.Linear(lowerCAmelCase_ , module.out_features , bias=lowerCAmelCase_ ) , )
SCREAMING_SNAKE_CASE_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _lowercase ( self : List[Any] , lowerCAmelCase_ : Any , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
return self.module(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) + self.adapter(lowerCAmelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = """bigscience/bloom-1b7"""
# Constant values
UpperCAmelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
UpperCAmelCase : int = """Hello my name is"""
UpperCAmelCase : List[str] = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
UpperCAmelCase : Dict = 10
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(self.model_name )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase_ , '''quantization_config''' ) )
SCREAMING_SNAKE_CASE_ = config.to_dict()
SCREAMING_SNAKE_CASE_ = config.to_diff_dict()
SCREAMING_SNAKE_CASE_ = config.to_json_string()
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
SCREAMING_SNAKE_CASE_ = self.model_fpaa.get_memory_footprint()
SCREAMING_SNAKE_CASE_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
SCREAMING_SNAKE_CASE_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BitsAndBytesConfig()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase_ )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase_ , load_in_abit=lowerCAmelCase_ , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = self.model_fpaa.to(torch.floataa )
SCREAMING_SNAKE_CASE_ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.half()
# Check this does not throw an error
SCREAMING_SNAKE_CASE_ = self.model_fpaa.float()
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''t5-small'''
SCREAMING_SNAKE_CASE_ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(cls.model_name )
SCREAMING_SNAKE_CASE_ = '''Translate in German: Hello, my dog is cute'''
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
from transformers import TaForConditionalGeneration
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration._keep_in_fpaa_modules
SCREAMING_SNAKE_CASE_ = None
# test with `t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = modules
def _lowercase ( self : Optional[Any] ) -> int:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**lowerCAmelCase_ )
# test with `flan-t5-small`
SCREAMING_SNAKE_CASE_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
SCREAMING_SNAKE_CASE_ = model.generate(**lowerCAmelCase_ )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# model_name
SCREAMING_SNAKE_CASE_ = '''bigscience/bloom-560m'''
SCREAMING_SNAKE_CASE_ = '''t5-small'''
# Different types of model
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
# Sequence classification model
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
# CausalLM model
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
# Seq2seq model
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase_ , device_map='''auto''' )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().setUp()
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
SCREAMING_SNAKE_CASE_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase_ , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
SCREAMING_SNAKE_CASE_ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase_ ) , self.EXPECTED_OUTPUTS )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''facebook/opt-350m'''
super().setUp()
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
SCREAMING_SNAKE_CASE_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
SCREAMING_SNAKE_CASE_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE_ = LoRALayer(module.q_proj , rank=16 )
SCREAMING_SNAKE_CASE_ = LoRALayer(module.k_proj , rank=16 )
SCREAMING_SNAKE_CASE_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
SCREAMING_SNAKE_CASE_ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE_ = model.forward(**lowerCAmelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = """gpt2-xl"""
UpperCAmelCase : str = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 393
| 1
|
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ ,a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =emb.weight.shape
_SCREAMING_SNAKE_CASE =nn.Linear(a__ ,a__ ,bias=a__)
_SCREAMING_SNAKE_CASE =emb.weight.data
return lin_layer
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =torch.load(a__ ,map_location='''cpu''')
_SCREAMING_SNAKE_CASE =Namespace(**checkpoint['''cfg''']['''model'''])
_SCREAMING_SNAKE_CASE =checkpoint['''model''']
remove_ignore_keys_(a__)
_SCREAMING_SNAKE_CASE =state_dict['''decoder.embed_tokens.weight'''].shape[0]
_SCREAMING_SNAKE_CASE ={key.replace('''decoder''' ,'''model'''): val for key, val in state_dict.items()}
_SCREAMING_SNAKE_CASE =XGLMConfig(
vocab_size=a__ ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='''gelu''' ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
_SCREAMING_SNAKE_CASE =XGLMForCausalLM(a__)
_SCREAMING_SNAKE_CASE =model.load_state_dict(a__ ,strict=a__)
print(a__)
_SCREAMING_SNAKE_CASE =make_linear_from_emb(model.model.embed_tokens)
return model
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Dict = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 712
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase( a__ ,a__=() ,a__=None ,a__="no" ,a__="29500"):
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
if any(key.startswith('''KAGGLE''') for key in os.environ.keys()):
_SCREAMING_SNAKE_CASE =True
elif "IPython" in sys.modules:
_SCREAMING_SNAKE_CASE ='''google.colab''' in str(sys.modules['''IPython'''].get_ipython())
try:
_SCREAMING_SNAKE_CASE =PrecisionType(mixed_precision.lower())
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.")
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' ,a__) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''')
if num_processes is None:
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,distributed_type='''TPU''')
print(f"Launching a training on {num_processes} TPU cores.")
xmp.spawn(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''')
else:
print('''Launching training on one CPU.''')
function(*a__)
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''')
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''')
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''')
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a__ ,master_addr='''127.0.01''' ,master_port=a__ ,mixed_precision=a__):
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,distributed_type='''MULTI_GPU''')
print(f"Launching training on {num_processes} GPUs.")
try:
start_processes(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''') from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_SCREAMING_SNAKE_CASE ='''1'''
print('''Launching training on MPS.''')
elif torch.cuda.is_available():
print('''Launching training on one GPU.''')
else:
print('''Launching training on CPU.''')
function(*a__)
def lowerCamelCase( a__ ,a__=() ,a__=2):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=a__ ,master_addr='''127.0.01''' ,master_port='''29500''' ,accelerate_mixed_precision='''no''' ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu='''yes''' ,):
_SCREAMING_SNAKE_CASE =PrepareForLaunch(a__ ,debug=a__)
start_processes(a__ ,args=a__ ,nprocs=a__ ,start_method='''fork''')
| 191
| 0
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _a ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
snake_case__ : Any = VideoMAEConfig()
set_architecture_configs(__lowerCAmelCase , __lowerCAmelCase )
if "finetuned" not in model_name:
snake_case__ : List[Any] = False
if "finetuned" in model_name:
snake_case__ : Optional[Any] = '''huggingface/label-files'''
if "kinetics" in model_name:
snake_case__ : int = 4_00
snake_case__ : List[Any] = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
snake_case__ : Tuple = 1_74
snake_case__ : int = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
snake_case__ : Optional[Any] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Dict = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
return config
def _a ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
if "small" in model_name:
snake_case__ : List[str] = 3_84
snake_case__ : Optional[Any] = 15_36
snake_case__ : Optional[Any] = 12
snake_case__ : List[str] = 16
snake_case__ : List[str] = 12
snake_case__ : str = 3
snake_case__ : Optional[int] = 1_92
snake_case__ : Optional[Any] = 7_68
elif "large" in model_name:
snake_case__ : List[Any] = 10_24
snake_case__ : List[str] = 40_96
snake_case__ : Tuple = 24
snake_case__ : Tuple = 16
snake_case__ : str = 12
snake_case__ : Union[str, Any] = 8
snake_case__ : int = 5_12
snake_case__ : Dict = 20_48
elif "huge" in model_name:
snake_case__ : Optional[int] = 12_80
snake_case__ : Tuple = 51_20
snake_case__ : Union[str, Any] = 32
snake_case__ : Union[str, Any] = 16
snake_case__ : Dict = 12
snake_case__ : Optional[int] = 8
snake_case__ : Tuple = 6_40
snake_case__ : Dict = 25_60
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def _a ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
if "encoder." in name:
snake_case__ : Optional[int] = name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
snake_case__ : Dict = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
snake_case__ : Optional[Any] = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case__ : Union[str, Any] = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case__ : Tuple = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case__ : List[str] = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
snake_case__ : List[str] = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case__ : List[Any] = name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
snake_case__ : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
snake_case__ : Dict = name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
snake_case__ : Tuple = name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
snake_case__ : List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case__ : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case__ : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case__ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
snake_case__ : List[Any] = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case__ : Optional[int] = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case__ : Any = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case__ : Optional[int] = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case__ : int = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
snake_case__ : int = name.replace('''head''' , '''classifier''' )
return name
def _a ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[int] = orig_state_dict.pop(__lowerCAmelCase )
if key.startswith('''encoder.''' ):
snake_case__ : Dict = key.replace('''encoder.''' , '''''' )
if "qkv" in key:
snake_case__ : Union[str, Any] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
snake_case__ : Any = config.decoder_hidden_size
snake_case__ : Optional[Any] = int(key_split[2] )
snake_case__ : Any = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case__ : List[Any] = val[:dim, :]
snake_case__ : Dict = val[dim : dim * 2, :]
snake_case__ : str = val[-dim:, :]
else:
snake_case__ : Optional[int] = config.hidden_size
snake_case__ : Any = int(key_split[1] )
snake_case__ : Any = '''videomae.encoder.layer.'''
if "weight" in key:
snake_case__ : int = val[:dim, :]
snake_case__ : Optional[Any] = val[dim : dim * 2, :]
snake_case__ : Dict = val[-dim:, :]
else:
snake_case__ : Tuple = val
return orig_state_dict
def _a ( ):
"""simple docstring"""
snake_case__ : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case__ : Dict = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
snake_case__ : Optional[Any] = get_videomae_config(__lowerCAmelCase )
if "finetuned" in model_name:
snake_case__ : Optional[Any] = VideoMAEForVideoClassification(__lowerCAmelCase )
else:
snake_case__ : Union[str, Any] = VideoMAEForPreTraining(__lowerCAmelCase )
# download original checkpoint, hosted on Google Drive
snake_case__ : Optional[int] = '''pytorch_model.bin'''
gdown.cached_download(__lowerCAmelCase , __lowerCAmelCase , quiet=__lowerCAmelCase )
snake_case__ : str = torch.load(__lowerCAmelCase , map_location='''cpu''' )
if "model" in files:
snake_case__ : Tuple = files['''model''']
else:
snake_case__ : int = files['''module''']
snake_case__ : int = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify model on basic input
snake_case__ : Tuple = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case__ : Any = prepare_video()
snake_case__ : Tuple = image_processor(__lowerCAmelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
snake_case__ : Any = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case__ : Any = torch.load(__lowerCAmelCase )
snake_case__ : Any = model(**__lowerCAmelCase )
snake_case__ : Union[str, Any] = outputs.logits
snake_case__ : List[str] = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case__ : Union[str, Any] = torch.Size([1, 4_00] )
snake_case__ : str = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case__ : Union[str, Any] = torch.Size([1, 1_74] )
snake_case__ : Dict = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case__ : Optional[int] = torch.Size([1, 14_08, 15_36] )
snake_case__ : List[str] = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case__ : Union[str, Any] = torch.Size([1, 14_08, 15_36] )
snake_case__ : List[str] = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case__ : List[Any] = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case__ : List[Any] = torch.Size([1, 14_08, 15_36] )
snake_case__ : List[str] = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case__ : int = torch.Size([1, 4_00] )
snake_case__ : Dict = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case__ : Union[str, Any] = torch.Size([1, 4_00] )
snake_case__ : List[str] = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case__ : str = torch.Size([1, 4_00] )
snake_case__ : Tuple = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case__ : Dict = torch.Size([1, 4_00] )
snake_case__ : List[str] = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case__ : Tuple = torch.Size([1, 14_08, 15_36] )
snake_case__ : Union[str, Any] = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case__ : int = torch.Size([1, 1_74] )
snake_case__ : List[Any] = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case__ : Optional[Any] = torch.Size([1, 14_08, 15_36] )
snake_case__ : List[str] = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case__ : str = torch.Size([1, 1_74] )
snake_case__ : List[str] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case__ : List[str] = outputs.loss
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCAmelCase , organization='''nielsr''' )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase__ : int = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 347
|
'''simple docstring'''
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
| 1
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : str=13 , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Union[str, Any]=99 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : int=32 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Union[str, Any]=512 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Dict="last" , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[Any]=0 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_lengths
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = gelu_activation
_UpperCAmelCase = sinusoidal_embeddings
_UpperCAmelCase = causal
_UpperCAmelCase = asm
_UpperCAmelCase = n_langs
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_special
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = summary_type
_UpperCAmelCase = use_proj
_UpperCAmelCase = scope
_UpperCAmelCase = bos_token_id
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase_ ( self : Union[str, Any] ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , ):
_UpperCAmelCase = XLMModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , langs=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , ):
_UpperCAmelCase = XLMWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , ):
_UpperCAmelCase = XLMForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
_UpperCAmelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , ):
_UpperCAmelCase = XLMForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
_UpperCAmelCase = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
((_UpperCAmelCase ) , ) = result_with_labels.to_tuple()
_UpperCAmelCase = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
((_UpperCAmelCase ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , ):
_UpperCAmelCase = XLMForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = XLMForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = XLMForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_snake_case : Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Dict = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any]=False ):
_UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = XLMModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[Any]=1 ):
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(
[isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(__lowerCAmelCase ) )
self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__lowerCAmelCase ):
# adds PAD dummy token
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[str]=1 ):
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(
[isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__lowerCAmelCase ) , )
self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__lowerCAmelCase ):
# adds PAD dummy token
_UpperCAmelCase = min_length + idx + 1
_UpperCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCAmelCase ) , )
pass
@slow
def lowerCAmelCase_ ( self : List[Any] ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = XLMModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__lowerCAmelCase )
_UpperCAmelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=__lowerCAmelCase ) # the president
_UpperCAmelCase = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_UpperCAmelCase = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCAmelCase )
| 709
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase__ = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase ,id=lowercase )
| 275
| 0
|
def _UpperCAmelCase (UpperCamelCase__ : List[Any] = 1000000 ):
_A : Any = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCamelCase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 503
|
'''simple docstring'''
import numpy as np
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = int(np.ceil((x_end - xa) / h ) )
__lowerCamelCase = np.zeros((n + 1,) )
__lowerCamelCase = ya
__lowerCamelCase = xa
for k in range(UpperCamelCase__ ):
__lowerCamelCase = f(UpperCamelCase__ , y[k] )
__lowerCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCamelCase = f(x + h , y[k] + h * ka )
__lowerCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546
| 0
|
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int = 3, __snake_case : int = 7, __snake_case : int = 1_000_000 ) -> int:
"""simple docstring"""
A__ : int =0
A__ : Tuple =1
for current_denominator in range(1, limit + 1 ):
A__ : Union[str, Any] =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A__ : Optional[Any] =current_numerator
A__ : int =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 687
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a_ ( __snake_case : Callable , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =int(np.ceil((x_end - xa) / step_size ) )
lowerCamelCase_ =np.zeros((n + 1,) )
lowerCamelCase_ =ya
lowerCamelCase_ =xa
for k in range(__snake_case ):
lowerCamelCase_ =y[k] + step_size * ode_func(__snake_case , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : List[Any] = logging.get_logger(__name__)
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
lowerCamelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase_ =pt_tuple_key[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str:
"""simple docstring"""
# convert pytorch tensor to numpy
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
lowerCamelCase_ ={}
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase_ =torch.load(__snake_case )
lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase_ =flax_model.params['''params''']
lowerCamelCase_ =flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
lowerCamelCase_ =flax_model.params
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ =tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
lowerCamelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
lowerCamelCase_ =jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase_ =jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =os.path.abspath(__snake_case )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
lowerCamelCase_ =from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowerCamelCase_ =jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
lowerCamelCase_ =pt_model.state_dict()
lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase_ =[]
lowerCamelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
lowerCamelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase_ ='''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase_ =key.split('''.''' )
lowerCamelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase_ =key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase_ =key_components[-2] + '''_v'''
if name is not None:
lowerCamelCase_ =key_components[:-3] + [name]
lowerCamelCase_ ='''.'''.join(__snake_case )
lowerCamelCase_ =key
if flax_key in special_pt_names:
lowerCamelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
lowerCamelCase_ =torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
lowerCamelCase_ =list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__snake_case ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 676
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __A ( __a ):
'''simple docstring'''
lowerCAmelCase_ = """funnel"""
lowerCAmelCase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , __lowerCAmelCase=3_0_5_2_2 , __lowerCAmelCase=[4, 4, 4] , __lowerCAmelCase=None , __lowerCAmelCase=2 , __lowerCAmelCase=7_6_8 , __lowerCAmelCase=1_2 , __lowerCAmelCase=6_4 , __lowerCAmelCase=3_0_7_2 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=None , __lowerCAmelCase=1E-9 , __lowerCAmelCase="mean" , __lowerCAmelCase="relative_shift" , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = vocab_size
lowerCamelCase__ = block_sizes
lowerCamelCase__ = [1] * len(A__ ) if block_repeats is None else block_repeats
assert len(A__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowerCamelCase__ = num_decoder_layers
lowerCamelCase__ = d_model
lowerCamelCase__ = n_head
lowerCamelCase__ = d_head
lowerCamelCase__ = d_inner
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = initializer_range
lowerCamelCase__ = initializer_std
lowerCamelCase__ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
lowerCamelCase__ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
lowerCamelCase__ = attention_type
lowerCamelCase__ = separate_cls
lowerCamelCase__ = truncate_seq
lowerCamelCase__ = pool_q_only
super().__init__(**A__ )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 721
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase__ = cst_fwd.get(__snake_case ,np.inf )
lowerCamelCase__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase__ = new_cost_f
lowerCamelCase__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = -1
lowerCamelCase__ = set()
lowerCamelCase__ = set()
lowerCamelCase__ = {source: 0}
lowerCamelCase__ = {destination: 0}
lowerCamelCase__ = {source: None}
lowerCamelCase__ = {destination: None}
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = PriorityQueue()
lowerCamelCase__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase__ , lowerCamelCase__ = queue_forward.get()
visited_forward.add(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = queue_backward.get()
visited_backward.add(__snake_case )
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
lowerCamelCase__ = pass_and_relaxation(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase__ = shortest_distance
return shortest_path_distance
_a = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_a = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Optional[int] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCAmelCase = BitConfig(
conv_layer=UpperCamelCase__ , num_labels=1000 , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
if "stem.conv" in name:
UpperCAmelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
UpperCAmelCase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
UpperCAmelCase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
UpperCAmelCase = '''bit.encoder.''' + name
return name
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> str:
'''simple docstring'''
UpperCAmelCase = get_config(UpperCamelCase__ )
# load original model from timm
UpperCAmelCase = create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model
UpperCAmelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
UpperCAmelCase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
UpperCAmelCase = BitForImageClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# create image processor
UpperCAmelCase = create_transform(**resolve_data_config({} , model=UpperCamelCase__ ) )
UpperCAmelCase = transform.transforms
UpperCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCAmelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=UpperCamelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase = prepare_img()
UpperCAmelCase = transform(UpperCamelCase__ ).unsqueeze(0 )
UpperCAmelCase = processor(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
# verify logits
with torch.no_grad():
UpperCAmelCase = model(UpperCamelCase__ )
UpperCAmelCase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCAmelCase = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(F"""ybelkada/{model_name}""" )
processor.push_to_hub(F"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__A : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 130
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCamelCase__ ( _a ):
a : List[Any] = """swinv2"""
a : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , A_ : Any=2_2_4 , A_ : int=4 , A_ : Optional[int]=3 , A_ : List[Any]=9_6 , A_ : List[Any]=[2, 2, 6, 2] , A_ : List[str]=[3, 6, 1_2, 2_4] , A_ : Union[str, Any]=7 , A_ : int=4.0 , A_ : List[str]=True , A_ : str=0.0 , A_ : Any=0.0 , A_ : Union[str, Any]=0.1 , A_ : Optional[Any]="gelu" , A_ : int=False , A_ : List[Any]=0.02 , A_ : Tuple=1e-5 , A_ : Tuple=3_2 , **A_ : int , ):
'''simple docstring'''
super().__init__(**A_ )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(A_ )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(A_ ) - 1) )
__lowercase = (0, 0, 0, 0)
| 708
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : List[Any] , A_ : List[str] , A_ : str=7 , A_ : Optional[int]=3 , A_ : str=1_8 , A_ : Tuple=3_0 , A_ : Optional[Any]=4_0_0 , A_ : Union[str, Any]=True , A_ : str=None , A_ : str=True , A_ : Optional[Any]=None , A_ : List[Any]=True , A_ : Union[str, Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , A_ : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , A_ : List[str]=True , ):
'''simple docstring'''
__lowercase = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__lowercase = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : List[Any]=False , A_ : List[Any]=False , A_ : Optional[Any]=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__lowercase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__lowercase = []
for i in range(self.batch_size ):
__lowercase , __lowercase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__lowercase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
__lowercase = [torch.from_numpy(A_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = ChineseCLIPImageProcessingTester(self , do_center_crop=A_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , """do_resize""" ) )
self.assertTrue(hasattr(A_ , """size""" ) )
self.assertTrue(hasattr(A_ , """do_center_crop""" ) )
self.assertTrue(hasattr(A_ , """center_crop""" ) )
self.assertTrue(hasattr(A_ , """do_normalize""" ) )
self.assertTrue(hasattr(A_ , """image_mean""" ) )
self.assertTrue(hasattr(A_ , """image_std""" ) )
self.assertTrue(hasattr(A_ , """do_convert_rgb""" ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 2_2_4, """width""": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
@require_torch
@require_vision
class lowerCamelCase__ ( _a , unittest.TestCase ):
a : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A_ )
__lowercase = 3
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , """do_resize""" ) )
self.assertTrue(hasattr(A_ , """size""" ) )
self.assertTrue(hasattr(A_ , """do_center_crop""" ) )
self.assertTrue(hasattr(A_ , """center_crop""" ) )
self.assertTrue(hasattr(A_ , """do_normalize""" ) )
self.assertTrue(hasattr(A_ , """image_mean""" ) )
self.assertTrue(hasattr(A_ , """image_std""" ) )
self.assertTrue(hasattr(A_ , """do_convert_rgb""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(A_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 442
| 0
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
UpperCAmelCase__ : Dict = len(lowerCAmelCase__ ) + 1
UpperCAmelCase__ : Union[str, Any] = len(lowerCAmelCase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase__ : int = [[0 for i in range(lowerCAmelCase__ )] for j in range(lowerCAmelCase__ )]
# since string of zero length match pattern of zero length
UpperCAmelCase__ : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase__ ):
for j in range(1 , lowerCAmelCase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase__ : Union[str, Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase__ : int = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase__ : int = dp[i - 1][j]
else:
UpperCAmelCase__ : Union[str, Any] = 0
else:
UpperCAmelCase__ : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCamelCase__ = '''aab'''
UpperCamelCase__ = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 75
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Union[tf.Tensor, np.ndarray] ) ->List[int]:
if isinstance(a , np.ndarray ):
return list(tensor.shape )
snake_case = tf.shape(a )
if tensor.shape == tf.TensorShape(a ):
return dynamic
snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a )]
def __UpperCamelCase ( a : tf.Tensor , a : Optional[int] = None , a : Optional[str] = None ) ->tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=a , name=a )
def __UpperCamelCase ( a : List[str] , a : Union[str, Any] , a : Tuple , a : List[str]=1e-5 , a : Any=-1 ) ->Dict:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a , a ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
snake_case , snake_case = tf.nn.moments(a , axes=[axis] , keepdims=a )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case = [1] * inputs.shape.rank
snake_case = shape_list(a )[axis]
snake_case = tf.reshape(a , a )
snake_case = tf.reshape(a , a )
# Compute layer normalization using the batch_normalization
# function.
snake_case = tf.nn.batch_normalization(
a , a , a , offset=a , scale=a , variance_epsilon=a , )
return outputs
def __UpperCamelCase ( a : Tuple , a : Union[str, Any]=0 , a : List[str]=-1 ) ->int:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case = tf.shape(a )
snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(a , a )
def __UpperCamelCase ( a : tf.Tensor ) ->tf.Tensor:
if not isinstance(a , tf.Tensor ):
snake_case = tf.convert_to_tensor(a ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCamelCase ( a : tf.Tensor , a : int , a : str = "input_ids" ) ->None:
tf.debugging.assert_less(
a , tf.cast(a , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(a )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __UpperCamelCase ( a : Tuple , a : List[str] , a : Tuple ) ->Dict:
snake_case = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case = [x for x in data if len(a ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
snake_case = np.asarray(a )
snake_case = 1
snake_case = np.array_split(a , a )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
snake_case = np.array_split(a , a )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a ):
snake_case = chunk_data
else:
snake_case = data
def __UpperCamelCase ( a : Optional[int] , a : Tuple ) ->Tuple:
if name in group.attrs:
snake_case = [n.decode('''utf8''' ) if hasattr(a , '''decode''' ) else n for n in group.attrs[name]]
else:
snake_case = []
snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(a , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def __UpperCamelCase ( a : Any ) ->List[Any]:
def _expand_single_ad_tensor(a : List[Any] ):
if isinstance(a , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(a , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a )
| 342
| 0
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__a : List[Any] = start
__a : Tuple = end
__a : Dict = val
__a : List[str] = (start + end) // 2
__a : Optional[int] = left
__a : Union[str, Any] = right
def __repr__( self ):
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = collection
__a : Tuple = function
if self.collection:
__a : Optional[int] = self._build_tree(0 , len(_UpperCAmelCase ) - 1 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self._update_tree(self.root , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self._query_range(self.root , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
if start == end:
return SegmentTreeNode(_UpperCAmelCase , _UpperCAmelCase , self.collection[start] )
__a : List[Any] = (start + end) // 2
__a : Union[str, Any] = self._build_tree(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self._build_tree(mid + 1 , _UpperCAmelCase )
return SegmentTreeNode(_UpperCAmelCase , _UpperCAmelCase , self.fn(left.val , right.val ) , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , _UpperCAmelCase , _UpperCAmelCase )
else:
self._update_tree(node.right , _UpperCAmelCase , _UpperCAmelCase )
__a : Dict = self.fn(node.left.val , node.right.val )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _UpperCAmelCase , _UpperCAmelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _UpperCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , _UpperCAmelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
if self.root is not None:
__a : List[str] = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
A = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 101
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__lowerCAmelCase = Features({'''text''': Value('''string''' )} )
__lowerCAmelCase = Features({} )
__lowerCAmelCase = "text"
@property
def _lowerCamelCase ( self ):
return {self.text_column: "text"}
| 101
| 1
|
'''simple docstring'''
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
return 1 if input_a == input_a else 0
def a_ ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 452
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a_ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCamelCase_ ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def a_ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def a_ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCamelCase_ ):
http_head("https://huggingface.co" )
| 452
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCamelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
A_ : Optional[Any] = model_type_to_module_name(_UpperCAmelCase )
A_ : Optional[int] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A_ : Optional[Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ):
"""simple docstring"""
A_ : List[Any] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def lowerCamelCase_ ( cls , snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = kwargs.pop('config' , snake_case_ )
A_ : Optional[Any] = kwargs.pop('trust_remote_code' , snake_case_ )
A_ : Any = True
A_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(snake_case_ , **snake_case_ )
A_ : List[Any] = config_dict.get('feature_extractor_type' , snake_case_ )
A_ : Optional[int] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
A_ : Optional[Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
A_ : Any = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.feature_extractor_type``
A_ : str = getattr(snake_case_ , 'feature_extractor_type' , snake_case_ )
if hasattr(snake_case_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
A_ : List[str] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
A_ : Union[str, Any] = feature_extractor_class_from_name(snake_case_ )
A_ : Dict = feature_extractor_auto_map is not None
A_ : Tuple = feature_extractor_class is not None or type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING
A_ : Optional[int] = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
A_ : Optional[int] = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
A_ : str = kwargs.pop('code_revision' , snake_case_ )
if os.path.isdir(snake_case_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING:
A_ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(snake_case_ )]
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(snake_case_ , snake_case_ )
| 707
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A_ , A_ : Union[str, Any] = array[indexa], array[indexa]
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if length > 1:
A_ : Any = int(length / 2 )
for i in range(_UpperCAmelCase , low + middle ):
comp_and_swap(_UpperCAmelCase , _UpperCAmelCase , i + middle , _UpperCAmelCase )
bitonic_merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
bitonic_merge(_UpperCAmelCase , low + middle , _UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if length > 1:
A_ : str = int(length / 2 )
bitonic_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
bitonic_sort(_UpperCAmelCase , low + middle , _UpperCAmelCase , 0 )
bitonic_merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : Dict = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ : List[str] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 302
| 0
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__snake_case : Dict ='3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 647
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__snake_case : Optional[int] =get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase = None ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = (
os.path.join(__lowerCamelCase ,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCAmelCase__ : Tuple = Extractor
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCAmelCase__ : List[Any] = os.path.abspath(__lowerCamelCase )
return os.path.join(self.extract_dir ,hash_url_to_filename(__lowerCamelCase ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(__lowerCamelCase ) and not (os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ))
)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = False ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.extractor.infer_extractor_format(__lowerCamelCase )
if not extractor_format:
return input_path
lowerCAmelCase__ : Optional[int] = self._get_output_path(__lowerCamelCase )
if self._do_extract(__lowerCamelCase ,__lowerCamelCase ):
self.extractor.extract(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
return output_path
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@classmethod
@abstractmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,**__lowerCamelCase ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
...
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__):
'''simple docstring'''
snake_case_ =[]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
with open(__lowerCamelCase ,'''rb''' ) as f:
return f.read(__lowerCamelCase )
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
lowerCAmelCase__ : Optional[Any] = max(len(__lowerCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
lowerCAmelCase__ : Optional[Any] = cls.read_magic_number(__lowerCamelCase ,__lowerCamelCase )
except OSError:
return False
return any(magic_number.startswith(__lowerCamelCase ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,**__lowerCamelCase ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(__lowerCamelCase )
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> Any:
"""simple docstring"""
def resolved(__lowerCamelCase ) -> str:
return os.path.realpath(os.path.abspath(__lowerCamelCase ) )
def badpath(__lowerCamelCase ,__lowerCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__lowerCamelCase ,__lowerCamelCase ) ).startswith(__lowerCamelCase )
def badlink(__lowerCamelCase ,__lowerCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCAmelCase__ : Dict = resolved(os.path.join(__lowerCamelCase ,os.path.dirname(info.name ) ) )
return badpath(info.linkname ,base=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = resolved(__lowerCamelCase )
for finfo in members:
if badpath(finfo.name ,__lowerCamelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__lowerCamelCase ,__lowerCamelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__lowerCamelCase ,__lowerCamelCase ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
lowerCAmelCase__ : int = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase ,members=TarExtractor.safemembers(__lowerCamelCase ,__lowerCamelCase ) )
tar_file.close()
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x1F\x8B"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
with gzip.open(__lowerCamelCase ,'''rb''' ) as gzip_file:
with open(__lowerCamelCase ,'''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(__lowerCamelCase ,magic_number=__lowerCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__lowerCamelCase ,'''rb''' ) as fp:
lowerCAmelCase__ : Optional[int] = _EndRecData(__lowerCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCAmelCase__ : Optional[int] = fp.read(__lowerCamelCase ) # CD is where we expect it to be
if len(__lowerCamelCase ) == sizeCentralDir:
lowerCAmelCase__ : List[str] = struct.unpack(__lowerCamelCase ,__lowerCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
with zipfile.ZipFile(__lowerCamelCase ,'''r''' ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
with lzma.open(__lowerCamelCase ) as compressed_file:
with open(__lowerCamelCase ,'''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
lowerCAmelCase__ : Dict = rarfile.RarFile(__lowerCamelCase )
rf.extractall(__lowerCamelCase )
rf.close()
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
lowerCAmelCase__ : Dict = zstd.ZstdDecompressor()
with open(__lowerCamelCase ,'''rb''' ) as ifh, open(__lowerCamelCase ,'''wb''' ) as ofh:
dctx.copy_stream(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x42\x5A\x68"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
with bza.open(__lowerCamelCase ,'''rb''' ) as compressed_file:
with open(__lowerCamelCase ,'''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
with pyazr.SevenZipFile(__lowerCamelCase ,'''r''' ) as archive:
archive.extractall(__lowerCamelCase )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =[b"""\x04\x22\x4D\x18"""]
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(__lowerCamelCase ,'''rb''' ) as compressed_file:
with open(__lowerCamelCase ,'''wb''' ) as extracted_file:
shutil.copyfileobj(__lowerCamelCase ,__lowerCamelCase )
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ ={
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ (cls ) -> str:
"""simple docstring"""
return max(
len(__lowerCamelCase )
for extractor in cls.extractors.values()
if issubclass(__lowerCamelCase ,__lowerCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ (__lowerCamelCase ,__lowerCamelCase ) -> Tuple:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(__lowerCamelCase ,magic_number_length=__lowerCamelCase )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' ,category=__lowerCamelCase ,)
lowerCAmelCase__ : int = cls.infer_extractor_format(__lowerCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
lowerCAmelCase__ : Dict = cls._get_magic_number_max_length()
lowerCAmelCase__ : Any = cls._read_magic_number(__lowerCamelCase ,__lowerCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__lowerCamelCase ,magic_number=__lowerCamelCase ):
return extractor_format
@classmethod
def lowerCAmelCase__ (cls ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = "deprecated" ,) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(__lowerCamelCase ) ,exist_ok=__lowerCamelCase )
# Prevent parallel extractions
lowerCAmelCase__ : Dict = str(Path(__lowerCamelCase ).with_suffix('''.lock''' ) )
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase ,ignore_errors=__lowerCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__lowerCamelCase ,__lowerCamelCase ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' ,category=__lowerCamelCase ,)
lowerCAmelCase__ : Dict = extractor if extractor != '''deprecated''' else extractor_format
else:
lowerCAmelCase__ : str = cls.extractors[extractor_format]
return extractor.extract(__lowerCamelCase ,__lowerCamelCase )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' ,category=__lowerCamelCase ,)
for extractor in cls.extractors.values():
if extractor.is_extractable(__lowerCamelCase ):
return extractor.extract(__lowerCamelCase ,__lowerCamelCase )
| 647
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def a__ ( self : str , _UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
__lowercase = {}
if top_k is not None:
__lowercase = top_k
return {}, {}, postprocess_params
def __call__( self : Tuple , _UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = load_image(_UpperCAmelCase )
__lowercase = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def a__ ( self : Tuple , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model(**_UpperCAmelCase )
return model_outputs
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=5 ) -> Any:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.softmax(-1 )[0]
__lowercase , __lowercase = probs.topk(_UpperCAmelCase )
elif self.framework == "tf":
__lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
__lowercase = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
__lowercase , __lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
| 703
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688
| 0
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : Optional[int] , ) ->int:
'''simple docstring'''
super().__init__()
A__ = value_function
A__ = unet
A__ = scheduler
A__ = env
A__ = env.get_dataset()
A__ = {}
for key in self.data.keys():
try:
A__ = self.data[key].mean()
except: # noqa: E722
pass
A__ = {}
for key in self.data.keys():
try:
A__ = self.data[key].std()
except: # noqa: E722
pass
A__ = env.observation_space.shape[0]
A__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->List[str]:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]) ->Any:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
if type(UpperCAmelCase__) is dict:
return {k: self.to_torch(UpperCAmelCase__) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase__):
return x_in.to(self.unet.device)
return torch.tensor(UpperCAmelCase__ , device=self.unet.device)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]) ->Any:
'''simple docstring'''
for key, val in cond.items():
A__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str) ->List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
A__ = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long)
for _ in range(UpperCAmelCase__):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
A__ = self.value_function(x.permute(0 , 2 , 1) , UpperCAmelCase__).sample
A__ = torch.autograd.grad([y.sum()] , [x])[0]
A__ = self.scheduler._get_variance(UpperCAmelCase__)
A__ = torch.exp(0.5 * posterior_variance)
A__ = model_std * grad
A__ = 0
A__ = x.detach()
A__ = x + scale * grad
A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim)
A__ = self.unet(x.permute(0 , 2 , 1) , UpperCAmelCase__).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
A__ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__)['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim)
A__ = self.to_torch(UpperCAmelCase__)
return x, y
def __call__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]=64 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Dict=0.1) ->List[str]:
'''simple docstring'''
A__ = self.normalize(UpperCAmelCase__ , '''observations''')
A__ = obs[None].repeat(UpperCAmelCase__ , axis=0)
A__ = {0: self.to_torch(UpperCAmelCase__)}
A__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
A__ = randn_tensor(UpperCAmelCase__ , device=self.unet.device)
A__ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim)
A__ = self.to_torch(UpperCAmelCase__)
# run the diffusion process
A__ , A__ = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# sort output trajectories by value
A__ = y.argsort(0 , descending=UpperCAmelCase__).squeeze()
A__ = x[sorted_idx]
A__ = sorted_values[:, :, : self.action_dim]
A__ = actions.detach().cpu().numpy()
A__ = self.de_normalize(UpperCAmelCase__ , key='''actions''')
# select the action with the highest value
if y is not None:
A__ = 0
else:
# if we didn't run value guiding, select a random action
A__ = np.random.randint(0 , UpperCAmelCase__)
A__ = denorm_actions[selected_index, 0]
return denorm_actions
| 87
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
A_ : List[str] ="""https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
A_ : int =requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A_ : Tuple =BeautifulSoup(res.text, """html.parser""")
A_ : Optional[Any] =list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 483
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase__ ( a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a__ :List[Any] = []
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def lowerCamelCase__ ( a : int , a : List[str] ) -> List[str]:
"""simple docstring"""
a__ :List[Any] = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
a__ :Dict = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def lowerCamelCase__ ( a : Any , a : List[str] , a : List[str] , a : str = None , a : Tuple = None , ) -> Any:
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(a : Optional[int] ) -> None:
a__ :List[Any] = True
for i in range(len(__lowerCAmelCase ) ):
a__ :Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
a__ :str = l[reversed_idx]
if start_edges is None:
a__ :Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
a__ :List[str] = [e == (d - 1) for e, d in zip(__lowerCAmelCase , __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ :List[Tuple[slice, ...]] = []
a__ :List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase , __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase , s + 1 ) )
else:
break
a__ :Tuple[slice, ...] = tuple(__lowerCAmelCase )
a__ :Tuple = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ :List[str] = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ :int = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ :str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCamelCase__ ( a : Dict , a : Union[str, Any] , a : List[str] , a : Any ) -> Union[str, Any]:
"""simple docstring"""
a__ :Any = t.shape[:no_batch_dims]
a__ :Dict = list(_flat_idx_to_idx(__lowerCAmelCase , __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
a__ :Optional[int] = list(_flat_idx_to_idx(flat_end - 1 , __lowerCAmelCase ) )
# Get an ordered list of slices to perform
a__ :Union[str, Any] = _get_minimal_slice_set(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
a__ :Optional[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCamelCase__ ( a : List[Any] , a : List[Any] , a : Any , a : int , a : List[Any] = False , a : List[str] = None , a : str = False , ) -> Optional[Any]:
"""simple docstring"""
if not (len(__lowerCAmelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ :int = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
a__ :Optional[int] = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(a : Dict ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ :Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ :str = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ :List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ :Dict[str, Any] = tensor_tree_map(_prep_inputs , __lowerCAmelCase )
a__ :List[Any] = None
if _out is not None:
a__ :Optional[int] = tensor_tree_map(lambda a : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ :List[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ :List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(a : Dict ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ :Tuple = 0
a__ :Any = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
a__ :Optional[int] = _select_chunk
else:
a__ :Tuple = partial(
_chunk_slice , flat_start=__lowerCAmelCase , flat_end=min(__lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(__lowerCAmelCase ) , )
a__ :Dict[str, Any] = tensor_tree_map(__lowerCAmelCase , __lowerCAmelCase )
# Run the layer on the chunk
a__ :Optional[int] = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
a__ :Optional[Any] = tensor_tree_map(lambda a : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
def assign(a : Dict , a : Optional[Any] ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assign(__lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ :str = da[k]
assign(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase , __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ :Any = xa
elif isinstance(__lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ :int = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ :Union[str, Any] = tensor_tree_map(lambda a : t.view(orig_batch_dims + t.shape[1:] ) , __lowerCAmelCase )
return out
class lowerCAmelCase_ :
def __init__( self : Tuple , __A : int = 512 , ) ->str:
"""simple docstring"""
a__ :str = max_chunk_size
a__ :Optional[int] = None
a__ :Optional[tuple] = None
def _snake_case ( self : Union[str, Any] , __A : Callable , __A : tuple , __A : int ) ->int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ :List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ :List[str] = [c for c in candidates if c > min_chunk_size]
a__ :int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__A : int ) -> bool:
try:
with torch.no_grad():
fn(*lowerCamelCase__ , chunk_size=lowerCamelCase__ )
return True
except RuntimeError:
return False
a__ :List[Any] = 0
a__ :Tuple = len(lowerCamelCase__ ) - 1
while i > min_viable_chunk_size_index:
a__ :List[Any] = test_chunk_size(candidates[i] )
if not viable:
a__ :Union[str, Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ :Optional[Any] = i
a__ :int = (i + len(lowerCamelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self : Union[str, Any] , __A : Iterable , __A : Iterable ) ->bool:
"""simple docstring"""
a__ :List[str] = True
for aa, aa in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert type(lowerCamelCase__ ) == type(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ :str = [v for _, v in sorted(aa.items() , key=lambda __A : x[0] )]
a__ :List[Any] = [v for _, v in sorted(aa.items() , key=lambda __A : x[0] )]
consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self : List[str] , __A : Callable , __A : tuple , __A : int , ) ->int:
"""simple docstring"""
a__ :Optional[Any] = True
a__ :tuple = tree_map(lambda __A : a.shape if isinstance(lowerCamelCase__ , torch.Tensor ) else a , lowerCamelCase__ , lowerCamelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowerCamelCase__ )
a__ :Dict = self._compare_arg_caches(self.cached_arg_data , lowerCamelCase__ )
else:
# Otherwise, we can reuse the precomputed value
a__ :int = False
if not consistent:
a__ :Any = self._determine_favorable_chunk_size(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
a__ :str = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 718
|
import datasets
from .evaluate import evaluate
snake_case__ = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
snake_case__ = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
snake_case__ = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class lowerCAmelCase_ ( datasets.Metric):
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def _snake_case ( self : Dict , __A : List[Any] , __A : Optional[int] ) ->str:
"""simple docstring"""
a__ :Optional[int] = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
a__ :Optional[Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
a__ :Union[str, Any] = evaluate(dataset=__A , predictions=__A )
return score
| 373
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a__ : int = logging.get_logger(__name__)
a__ : int = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a__ : Optional[Any] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a__ : Dict = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : str = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = ConvBertTokenizer
def __init__( self ,__snake_case=None ,__snake_case=None ,__snake_case=True ,__snake_case="[UNK]" ,__snake_case="[SEP]" ,__snake_case="[PAD]" ,__snake_case="[CLS]" ,__snake_case="[MASK]" ,__snake_case=True ,__snake_case=None ,**__snake_case ,):
"""simple docstring"""
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,)
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,__snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,__snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,__snake_case ) != tokenize_chinese_chars
):
A_ = getattr(__snake_case ,normalizer_state.pop('''type''' ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**__snake_case )
A_ = do_lower_case
def __UpperCAmelCase ( self ,__snake_case ,__snake_case=None ):
"""simple docstring"""
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self ,__snake_case ,__snake_case = None ):
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self ,__snake_case ,__snake_case = None ):
"""simple docstring"""
A_ = self._tokenizer.model.save(__snake_case ,name=__snake_case )
return tuple(__snake_case )
| 188
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a__ : Optional[int] = TypeVar('T')
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self ,__snake_case = True ):
"""simple docstring"""
A_ = {} # dictionary of lists
A_ = directed
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
self.adj_list[destination_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
A_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__snake_case )
A_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A_ = [destination_vertex]
A_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
A_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A_ = [destination_vertex]
A_ = []
return self
def __repr__( self ):
"""simple docstring"""
return pformat(self.adj_list )
| 188
| 1
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def lowercase ( A_ , A_ )-> List[Any]:
'''simple docstring'''
a : Union[str, Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
a : str = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=A_ , output_all_encodings=A_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , A_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
a : List[Any] = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
a : Optional[Any] = os.path.join(get_home_dir() , "models" )
a : Tuple = _load_vocab(A_ , A_ , A_ , cls=A_ )
a : str = nlp.model.BERTModel(
A_ , len(A_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=A_ , use_token_type_embed=A_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=A_ , use_decoder=A_ , )
original_bort.load_parameters(A_ , cast_dtype=A_ , ignore_extra=A_ )
a : Tuple = original_bort._collect_params_with_prefix()
# Build our config 🤗
a : Optional[Any] = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.0_2,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(A_ ),
}
a : Tuple = BertConfig.from_dict(A_ )
a : Any = BertForMaskedLM(A_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A_ , A_ ):
a : Tuple = hf_param.shape
a : List[Any] = to_torch(params[gluon_param] )
a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
a : int = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
a : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
a : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
a : Dict = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
a : BertSelfAttention = layer.attention.self
a : int = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
a : Optional[Any] = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
a : Dict = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
a : List[Any] = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
a : Any = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
a : int = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
a : BertSelfOutput = layer.attention.output
a : List[str] = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
a : int = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
a : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
a : BertIntermediate = layer.intermediate
a : Optional[int] = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
a : Dict = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
a : BertOutput = layer.output
a : Tuple = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
a : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
a : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
a : Any = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
a : List[Any] = RobertaTokenizer.from_pretrained("roberta-base" )
a : Optional[int] = tokenizer.encode_plus(A_ )["input_ids"]
# Get gluon output
a : int = mx.nd.array([input_ids] )
a : str = original_bort(inputs=A_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A_ )
a : Dict = BertModel.from_pretrained(A_ )
hf_bort_model.eval()
a : Optional[int] = tokenizer.encode_plus(A_ , return_tensors="pt" )
a : Dict = hf_bort_model(**A_ )[0]
a : List[str] = output_gluon[0].asnumpy()
a : Dict = output_hf[0].detach().numpy()
a : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
a : Union[str, Any] = np.allclose(A_ , A_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , A_ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowercase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 720
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowercase ( A_ , A_ , A_ )-> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(A_ , 2 ) - pow(A_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(A_ , 2 ) - pow(A_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(A_ , 2 ) + pow(A_ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 0
|
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : int ):
A_ = order
# a_{0} ... a_{k}
A_ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A_ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A_ = [0.0] * self.order
# y[n-1] ... y[n-k]
A_ = [0.0] * self.order
def __A ( self : Union[str, Any] , UpperCAmelCase : list[float] , UpperCAmelCase : list[float] ):
if len(UpperCAmelCase ) < self.order:
A_ = [1.0, *a_coeffs]
if len(UpperCAmelCase ) != self.order + 1:
A_ = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(UpperCAmelCase )}'''
)
raise ValueError(UpperCAmelCase )
if len(UpperCAmelCase ) != self.order + 1:
A_ = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(UpperCAmelCase )}'''
)
raise ValueError(UpperCAmelCase )
A_ = a_coeffs
A_ = b_coeffs
def __A ( self : Tuple , UpperCAmelCase : float ):
A_ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A_ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A_ = self.input_history[:-1]
A_ = self.output_history[:-1]
A_ = sample
A_ = result
return result
| 86
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ , A_ = image.size
A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ = image.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] )
A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
A_ = image[None].transpose(0 ,3 ,1 ,2 )
A_ = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
A_ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = preprocess(UpperCAmelCase )
A_ , A_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ = next(self.unet.parameters() ).dtype
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
A_ = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
A_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
A_ = torch.cat([latents, image] , dim=1 )
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
A_ = self.vqvae.decode(UpperCAmelCase ).sample
A_ = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
A_ = image / 2 + 0.5
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 86
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = F"""{sampling_rate}"""
lowercase__ : List[Any] = "1"
lowercase__ : List[str] = "f32le"
lowercase__ : Optional[int] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ : Union[str, Any] = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
lowercase__ : Optional[int] = output_stream[0]
lowercase__ : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "f32le" , ):
"""simple docstring"""
lowercase__ : List[Any] = F"""{sampling_rate}"""
lowercase__ : Optional[int] = "1"
if format_for_conversion == "s16le":
lowercase__ : Union[str, Any] = 2
elif format_for_conversion == "f32le":
lowercase__ : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase__ : List[Any] = platform.system()
if system == "Linux":
lowercase__ : Union[str, Any] = "alsa"
lowercase__ : Any = "default"
elif system == "Darwin":
lowercase__ : int = "avfoundation"
lowercase__ : Any = ":0"
elif system == "Windows":
lowercase__ : List[str] = "dshow"
lowercase__ : List[str] = "default"
lowercase__ : Union[str, Any] = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
lowercase__ : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ : Tuple = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
lowercase__ : List[Any] = stream_chunk_s
else:
lowercase__ : Tuple = chunk_length_s
lowercase__ : int = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
lowercase__ : Optional[int] = np.intaa
lowercase__ : List[str] = 2
elif format_for_conversion == "f32le":
lowercase__ : Any = np.floataa
lowercase__ : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase__ : Tuple = chunk_length_s / 6
lowercase__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
lowercase__ : List[str] = [stride_length_s, stride_length_s]
lowercase__ : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ : Optional[int] = datetime.datetime.now()
lowercase__ : Union[str, Any] = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
lowercase__ : List[Any] = np.frombuffer(item["raw"] , dtype=a__ )
lowercase__ : List[Any] = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
lowercase__ : int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[str] = b""
lowercase__ , lowercase__ : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase__ : Tuple = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
lowercase__ : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
lowercase__ : List[Any] = (_stride_left, stride_right)
lowercase__ : Dict = {"raw": acc[:chunk_len], "stride": stride}
if stream:
lowercase__ : str = False
yield item
lowercase__ : Tuple = stride_left
lowercase__ : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
lowercase__ : Optional[Any] = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
lowercase__ : List[Any] = False
yield item
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
lowercase__ : Tuple = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 715
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = 'open-llama'
def __init__( self : Union[str, Any] , UpperCamelCase__ : str=100_000 , UpperCamelCase__ : str=4_096 , UpperCamelCase__ : Tuple=11_008 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : List[str]=32 , UpperCamelCase__ : int="silu" , UpperCamelCase__ : Optional[Any]=2_048 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : List[str]=1e-6 , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : str=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = intermediate_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = initializer_range
lowercase_ = rms_norm_eps
lowercase_ = use_cache
lowercase_ = kwargs.pop(
"""use_memorry_efficient_attention""" , UpperCamelCase__ )
lowercase_ = hidden_dropout_prob
lowercase_ = attention_dropout_prob
lowercase_ = use_stable_embedding
lowercase_ = shared_input_output_embedding
lowercase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
lowercase_ = self.rope_scaling.get("""type""" , UpperCamelCase__ )
lowercase_ = self.rope_scaling.get("""factor""" , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 412
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 412
| 1
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def __snake_case ( SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = [0] * no_of_processes
_lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = burst_time[i]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 9_9999_9999
_lowerCAmelCase = 0
_lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowerCAmelCase = remaining_time[j]
_lowerCAmelCase = j
_lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowerCAmelCase = remaining_time[short]
if minm == 0:
_lowerCAmelCase = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
_lowerCAmelCase = False
# Find finish time of current process
_lowerCAmelCase = increment_time + 1
# Calculate waiting time
_lowerCAmelCase = finish_time - arrival_time[short]
_lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def __snake_case ( SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: list[int] ):
"""simple docstring"""
_lowerCAmelCase = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def __snake_case ( SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = total_waiting_time + waiting_time[i]
_lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
_snake_case = int(input())
_snake_case = [0] * no_of_processes
_snake_case = [0] * no_of_processes
_snake_case = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
_snake_case , _snake_case = map(int, input().split())
_snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_snake_case = burst_time
_snake_case = no_of_processes
_snake_case = waiting_time
_snake_case = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_snake_case = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 491
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=[0.48145466, 0.4578275, 0.40821073] , UpperCAmelCase_ : List[Any]=[0.26862954, 0.26130258, 0.27577711] , UpperCAmelCase_ : Optional[int]=True , ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = size if size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_convert_rgb
def __lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Dict=False ) -> str:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowerCAmelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_lowerCAmelCase = []
for i in range(self.batch_size ):
_lowerCAmelCase , _lowerCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_lowerCAmelCase = [torch.from_numpy(UpperCAmelCase_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCAmelCase_ )
@property
def __lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_convert_rgb' ) )
def __lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCAmelCase_ )
_lowerCAmelCase = 3
@property
def __lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_convert_rgb' ) )
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def __lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 491
| 1
|
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(snake_case_ )
UpperCAmelCase_ = flatten_dict(snake_case_ )
return flax_params
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
UpperCAmelCase_ = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase_ = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase_ = new_key.replace(snake_case_ , snake_case_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase_ = new_key.replace(snake_case_ , snake_case_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase_ = re.sub(R"layers_(\d+)" , R"layer.\1" , snake_case_ )
UpperCAmelCase_ = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase_ = re.sub(R"layers_(\d+)" , R"layer.\1" , snake_case_ )
UpperCAmelCase_ = flax_dict[key]
UpperCAmelCase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase_ = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Dict=False , snake_case_ : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = get_flax_param(snake_case_ )
if not use_large:
UpperCAmelCase_ = PixaStructVisionConfig()
UpperCAmelCase_ = PixaStructTextConfig()
else:
UpperCAmelCase_ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
UpperCAmelCase_ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
UpperCAmelCase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=snake_case_ )
UpperCAmelCase_ = PixaStructForConditionalGeneration(snake_case_ )
UpperCAmelCase_ = rename_and_convert_flax_params(snake_case_ )
model.load_state_dict(snake_case_ )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
UpperCAmelCase_ = PixaStructImageProcessor()
UpperCAmelCase_ = PixaStructProcessor(image_processor=snake_case_ , tokenizer=snake_case_ )
if use_large:
UpperCAmelCase_ = 40_96
UpperCAmelCase_ = True
# mkdir if needed
os.makedirs(snake_case_ , exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
print("Model saved in {}".format(snake_case_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 78
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 606
| 0
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
def _UpperCAmelCase ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = LlamaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ )
_lowerCamelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCamelCase = True
_lowerCamelCase = LlamaModel(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCamelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
_lowerCamelCase = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCamelCase = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
_lowerCamelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
_lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCamelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
_lowerCamelCase = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1E-3 ) )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowercase_ ,lowercase_ ,lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def _UpperCAmelCase ( self ):
_lowerCamelCase = LlamaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def _UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = 3
_lowerCamelCase = input_dict['''input_ids''']
_lowerCamelCase = input_ids.ne(1 ).to(a__ )
_lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = 3
_lowerCamelCase = '''single_label_classification'''
_lowerCamelCase = input_dict['''input_ids''']
_lowerCamelCase = input_ids.ne(1 ).to(a__ )
_lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCamelCase = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = 3
_lowerCamelCase = '''multi_label_classification'''
_lowerCamelCase = input_dict['''input_ids''']
_lowerCamelCase = input_ids.ne(1 ).to(a__ )
_lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCamelCase = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def _UpperCAmelCase ( self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
_lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase = LlamaModel(a__ )
original_model.to(a__ )
original_model.eval()
_lowerCamelCase = original_model(a__ ).last_hidden_state
_lowerCamelCase = original_model(a__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCamelCase = {'''type''': scaling_type, '''factor''': 10.0}
_lowerCamelCase = LlamaModel(a__ )
scaled_model.to(a__ )
scaled_model.eval()
_lowerCamelCase = scaled_model(a__ ).last_hidden_state
_lowerCamelCase = scaled_model(a__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a__ , a__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a__ , a__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a__ , a__ , atol=1E-5 ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowerCamelCase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
_lowerCamelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowerCamelCase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
_lowerCamelCase = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
_lowerCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowerCamelCase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
_lowerCamelCase = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
_lowerCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowerCamelCase = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
_lowerCamelCase = model(torch.tensor(a__ ) )
_lowerCamelCase = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_lowerCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def _UpperCAmelCase ( self ):
_lowerCamelCase = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
_lowerCamelCase = '''Simply put, the theory of relativity states that '''
_lowerCamelCase = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
_lowerCamelCase = tokenizer.encode(a__ , return_tensors='''pt''' )
_lowerCamelCase = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=a__ )
# greedy generation outputs
_lowerCamelCase = model.generate(a__ , max_new_tokens=64 , top_p=a__ , temperature=1 , do_sample=a__ )
_lowerCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
| 716
|
import math
import qiskit
def _lowerCamelCase ( _a = 1 , _a = 1 , _a = 1 ):
"""simple docstring"""
if (
isinstance(_a , _a )
or isinstance(_a , _a )
or isinstance(_a , _a )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_a ) != input_a)
or (math.floor(_a ) != input_a)
or (math.floor(_a ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_lowerCamelCase = qiskit.QuantumRegister(4 , '''qr''' )
_lowerCamelCase = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_lowerCamelCase = [input_a, input_a, carry_in]
_lowerCamelCase = qiskit.QuantumCircuit(_a , _a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _a ) # measure the last two qbits
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
_lowerCamelCase = qiskit.execute(_a , _a , shots=1_0_0_0 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 297
| 0
|
'''simple docstring'''
from manim import *
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Optional[int] ):
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = Text('CPU' , font_size=2_4 )
__snake_case = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
__snake_case = [mem.copy() for i in range(1 )]
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = Text('GPU' , font_size=2_4 )
__snake_case = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.align_to(__lowerCAmelCase , __lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__lowerCAmelCase )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
__snake_case = Text('Model' , font_size=2_4 )
__snake_case = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) , )
__snake_case = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=2.5 ) , Write(__lowerCAmelCase ) , Write(__lowerCAmelCase ) )
self.add(__lowerCAmelCase )
__snake_case = []
__snake_case = []
__snake_case = []
for i, rect in enumerate(__lowerCAmelCase ):
__snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
cpu_target.move_to(__lowerCAmelCase )
cpu_target.generate_target()
__snake_case = 0.46 / 4
__snake_case = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__lowerCAmelCase , buff=0.0 )
cpu_targs.append(__lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__lowerCAmelCase ) )
second_animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(*__lowerCAmelCase )
self.wait()
| 356
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a_ ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
__snake_case = 'ylacombe/bark-small'
__snake_case = tempfile.mkdtemp()
__snake_case = 'en_speaker_1'
__snake_case = 'This is a test string'
__snake_case = 'speaker_embeddings_path.json'
__snake_case = 'speaker_embeddings'
def lowercase__ ( self : int , **__lowerCAmelCase : str ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def lowercase__ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Dict ):
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase__ ( self : int ):
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase__ ( self : str ):
__snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__snake_case = 3_5
__snake_case = 2
__snake_case = 8
__snake_case = {
'semantic_prompt': np.ones(__lowerCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__snake_case = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
__snake_case = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__snake_case = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
__snake_case = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__snake_case = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase__ ( self : int ):
__snake_case = self.get_tokenizer()
__snake_case = BarkProcessor(tokenizer=__lowerCAmelCase )
__snake_case = processor(text=self.input_string )
__snake_case = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 356
| 1
|
'''simple docstring'''
def snake_case__ ( _A: List[str] , _A: Tuple , _A: int , _A: Dict , _A: Tuple , _A: Optional[Any] ) -> Any:
'''simple docstring'''
if index == r:
for j in range(_A ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCAmelCase = arr[i]
combination_util(_A , _A , _A , index + 1 , _A , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_A , _A , _A , _A , _A , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def snake_case__ ( _A: str , _A: Tuple , _A: Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_A , _A , _A , 0 , _A , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 605
|
'''simple docstring'''
from __future__ import annotations
import time
__lowercase = list[tuple[int, int]]
__lowercase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = pos_x
lowerCAmelCase = pos_y
lowerCAmelCase = (pos_y, pos_x)
lowerCAmelCase = goal_x
lowerCAmelCase = goal_y
lowerCAmelCase = parent
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __lowerCAmelCase)
lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __lowerCAmelCase)
lowerCAmelCase = [self.start]
lowerCAmelCase = False
def a_ ( self):
"""simple docstring"""
while self.node_queue:
lowerCAmelCase = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
lowerCAmelCase = True
return self.retrace_path(__lowerCAmelCase)
lowerCAmelCase = self.get_successors(__lowerCAmelCase)
for node in successors:
self.node_queue.append(__lowerCAmelCase)
if not self.reached:
return [self.start.pos]
return None
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
for action in delta:
lowerCAmelCase = parent.pos_x + action[1]
lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(__lowerCAmelCase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , __lowerCAmelCase))
return successors
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = node
lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
lowerCAmelCase = current_node.parent
path.reverse()
return path
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = BreadthFirstSearch(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = False
def a_ ( self):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCAmelCase = self.fwd_bfs.node_queue.pop(0)
lowerCAmelCase = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
lowerCAmelCase = True
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = current_bwd_node
lowerCAmelCase = current_fwd_node
lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(__lowerCAmelCase),
self.bwd_bfs: self.bwd_bfs.get_successors(__lowerCAmelCase),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__lowerCAmelCase)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.fwd_bfs.retrace_path(__lowerCAmelCase)
lowerCAmelCase = self.bwd_bfs.retrace_path(__lowerCAmelCase)
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase = (0, 0)
__lowercase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase = time.time()
__lowercase = BreadthFirstSearch(init, goal)
__lowercase = bfs.search()
__lowercase = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase = time.time()
__lowercase = BidirectionalBreadthFirstSearch(init, goal)
__lowercase = bd_bfs.search()
__lowercase = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 605
| 1
|
import requests
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {'Content-Type': 'application/json'}
lowerCamelCase_ = requests.post(lowercase , json={'text': message_body} , headers=lowercase )
if response.status_code != 2_00:
lowerCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 70
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : int = data
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = None
def _snake_case ( ):
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCamelCase : queue.Queue = queue.Queue()
_lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) )
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Tuple = q.get()
_lowerCamelCase : Any = F'Enter the left node of {node_found.data}: '
_lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCamelCase : Dict = TreeNode(int(__snake_case ) )
_lowerCamelCase : List[str] = left_node
q.put(__snake_case )
_lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: '
_lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) )
_lowerCamelCase : List[Any] = right_node
q.put(__snake_case )
raise
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : queue.Queue = queue.Queue()
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Any = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : queue.Queue = queue.Queue()
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Optional[Any] = []
while not q.empty():
_lowerCamelCase : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__snake_case )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : list[TreeNode] = []
_lowerCamelCase : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__snake_case )
_lowerCamelCase : Tuple = n.left
# end of while means current node doesn't have left child
_lowerCamelCase : Optional[Any] = stack.pop()
# start to traverse its right child
_lowerCamelCase : Dict = n.right
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : list[TreeNode] = []
_lowerCamelCase : int = node
while n or stack:
while n:
stack.append(__snake_case )
_lowerCamelCase : Any = n.left
_lowerCamelCase : Optional[Any] = stack.pop()
print(n.data , end=""",""" )
_lowerCamelCase : List[Any] = n.right
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Optional[Any] = node
stacka.append(__snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCamelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 88
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_a = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_a = {
"ctrl": 256,
}
_a = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = set()
lowerCamelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ = char
lowerCamelCase__ = set(__snake_case )
return pairs
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = CONTROL_CODES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="<unk>" , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(unk_token=__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase__ = json.load(__lowerCAmelCase )
lowerCamelCase__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowerCamelCase__ = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase__ = tuple(__lowerCAmelCase )
lowerCamelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCamelCase__ = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
lowerCamelCase__ = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ = bigram
lowerCamelCase__ = []
lowerCamelCase__ = 0
while i < len(__lowerCAmelCase ):
try:
lowerCamelCase__ = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ = tuple(__lowerCAmelCase )
lowerCamelCase__ = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
lowerCamelCase__ = get_pairs(__lowerCAmelCase )
lowerCamelCase__ = '''@@ '''.join(__lowerCAmelCase )
lowerCamelCase__ = word[:-4]
lowerCamelCase__ = word
return word
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = re.findall(r'''\S+\n?''' , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = ''' '''.join(__lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
lowerCamelCase__ = 0
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase__ = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_a = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.task_name.lower()
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = """train"""
lowerCAmelCase_ = """dev"""
lowerCAmelCase_ = """test"""
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = Split.train , __lowerCAmelCase = None , ):
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __lowerCAmelCase , )
lowerCamelCase__ = args
lowerCamelCase__ = glue_processors[args.task_name]()
lowerCamelCase__ = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
lowerCamelCase__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCamelCase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase__ , lowerCamelCase__ = label_list[2], label_list[1]
lowerCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase__ = time.time()
lowerCamelCase__ = torch.load(__lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCamelCase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase__ = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase__ = examples[:limit_length]
lowerCamelCase__ = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
lowerCamelCase__ = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
| 29
| 0
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = """Hello world! cécé herlolip"""
SCREAMING_SNAKE_CASE__ : Any = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__lowerCamelCase , large=__lowerCamelCase , share_emb=__lowerCamelCase , use_bert_emb=__lowerCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ : Any = torch.load(__lowerCamelCase , lambda __lowerCamelCase , __lowerCamelCase : storage )
UpperCAmelCase__ : int = AbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) , __lowerCamelCase )
original.eval()
UpperCAmelCase__ : Tuple = BertAbsSummarizer(__lowerCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase__ : Any = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase__ : List[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
UpperCAmelCase__ : Any = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
UpperCAmelCase__ : Tuple = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__lowerCamelCase )) )
UpperCAmelCase__ : Dict = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ : List[str] = encoder_input_ids
UpperCAmelCase__ : List[str] = decoder_input_ids
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ : Any = original(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
UpperCAmelCase__ : List[str] = original.generator(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = new_model(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )[0]
UpperCAmelCase__ : int = new_model.generator(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
UpperCAmelCase__ : Any = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__lowerCamelCase ) )
UpperCAmelCase__ : str = torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 79
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'informer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.0_5 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.0_2 , _lowerCAmelCase=True , _lowerCAmelCase = "prob" , _lowerCAmelCase = 5 , _lowerCAmelCase = True , **_lowerCAmelCase , ):
# time series specific configuration
UpperCAmelCase__ : List[str] = prediction_length
UpperCAmelCase__ : Optional[Any] = context_length or prediction_length
UpperCAmelCase__ : str = distribution_output
UpperCAmelCase__ : int = loss
UpperCAmelCase__ : Optional[Any] = input_size
UpperCAmelCase__ : Any = num_time_features
UpperCAmelCase__ : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase__ : Union[str, Any] = scaling
UpperCAmelCase__ : Optional[Any] = num_dynamic_real_features
UpperCAmelCase__ : List[str] = num_static_real_features
UpperCAmelCase__ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : List[str] = cardinality
else:
UpperCAmelCase__ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : str = embedding_dimension
else:
UpperCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ : Any = d_model
UpperCAmelCase__ : int = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : int = encoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_ffn_dim
UpperCAmelCase__ : List[Any] = encoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_layers
UpperCAmelCase__ : Tuple = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : List[str] = activation_dropout
UpperCAmelCase__ : Any = encoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = decoder_layerdrop
UpperCAmelCase__ : Tuple = activation_function
UpperCAmelCase__ : Dict = init_std
UpperCAmelCase__ : str = use_cache
# Informer
UpperCAmelCase__ : Union[str, Any] = attention_type
UpperCAmelCase__ : int = sampling_factor
UpperCAmelCase__ : Any = distil
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 79
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCamelCase_ ( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : list[float], UpperCamelCase__ : int ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase_ ( UpperCamelCase__ : list[dict[str, int]], UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = [float('''inf''' )] * vertex_count
UpperCamelCase__ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCamelCase__ = distance[u] + w
UpperCamelCase__ = check_negative_cycle(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = int(input("""Enter number of vertices: """).strip())
lowercase = int(input("""Enter number of edges: """).strip())
lowercase = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
lowercase , lowercase , lowercase = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
lowercase = {"""src""": src, """dst""": dest, """weight""": weight}
lowercase = int(input("""\nEnter shortest path source:""").strip())
lowercase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 591
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowercase ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_A : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Optional[Any] , _a : Optional[int] , _a : Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
UpperCamelCase__ = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : int = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : List[str] ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Tuple , _a : Dict , _a : Optional[int]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
UpperCamelCase__ = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : str ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
UpperCamelCase__ = 10.0
UpperCamelCase__ = 4
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def A_ ( self : int ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : int ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def A_ ( self : Dict ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict ):
UpperCamelCase__ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ = '''evil space-punk bird'''
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCamelCase__ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCamelCase__ = pipe(
_a , _a , control_image=_a , generator=_a , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 591
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase (_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,) -> Any:
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
__UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
__UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , A_ : List[str] , A_ : Dict=13 , A_ : str=7 , A_ : List[Any]=True , A_ : Optional[int]=False , A_ : Optional[int]=99 , A_ : str=16 , A_ : Optional[Any]=2 , A_ : Optional[int]=4 , A_ : List[Any]=4 , A_ : Optional[Any]="gelu" , A_ : Optional[int]=0.1 , A_ : Optional[int]=0.1 , A_ : List[str]=32 , A_ : str=2 , A_ : Any=1 , A_ : Tuple=0 , A_ : List[str]=0.02 , )-> Tuple:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = initializer_range
def A ( self : Any )-> Optional[Any]:
__UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase = shift_tokens_right(_lowercase , 1 , 2 )
__UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , )
__UpperCamelCase = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def A ( self : Optional[int] )-> Optional[Any]:
__UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self : Union[str, Any] , A_ : str , A_ : Any , A_ : Any )-> Optional[Any]:
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(_lowercase )
__UpperCamelCase = model.encode(inputs_dict["input_ids"] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
__UpperCamelCase = model.decode(_lowercase , _lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def A ( self : List[str] , A_ : Tuple , A_ : Union[str, Any] , A_ : Dict )-> Optional[int]:
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(_lowercase )
__UpperCamelCase = model.encode(inputs_dict["input_ids"] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
__UpperCamelCase = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : str = 9_9
def A ( self : Any )-> Any:
__UpperCamelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase = input_ids.shape[0]
__UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A ( self : List[Any] )-> List[str]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._get_config_and_data()
__UpperCamelCase = FlaxBlenderbotForConditionalGeneration(_lowercase )
__UpperCamelCase = lm_model(input_ids=_lowercase )
__UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , _lowercase )
def A ( self : int )-> List[Any]:
__UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCamelCase = FlaxBlenderbotForConditionalGeneration(_lowercase )
__UpperCamelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCamelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase )
__UpperCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , _lowercase )
def A ( self : Dict )-> Tuple:
__UpperCamelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCamelCase = shift_tokens_right(_lowercase , 1 , 2 )
__UpperCamelCase = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
__UpperCamelCase = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowercase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = True
_snake_case : Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def A ( self : str )-> str:
__UpperCamelCase = FlaxBlenderbotModelTester(self )
def A ( self : Dict )-> Any:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def A ( self : Any )-> int:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def A ( self : Tuple )-> int:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCamelCase = model_class(_lowercase )
@jax.jit
def encode_jitted(A_ : Any , A_ : Optional[int]=None , **A_ : Optional[Any] ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest("JIT Enabled" ):
__UpperCamelCase = encode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : str )-> Optional[int]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(_lowercase )
__UpperCamelCase = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__UpperCamelCase = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(A_ : List[str] , A_ : List[Any] , A_ : Dict ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest("JIT Enabled" ):
__UpperCamelCase = decode_jitted(**_lowercase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : Dict )-> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase = model(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def A ( self : int )-> str:
__UpperCamelCase = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
__UpperCamelCase = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__UpperCamelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=_lowercase )
__UpperCamelCase = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
__UpperCamelCase = ["Sam"]
__UpperCamelCase = tokenizer(_lowercase , return_tensors="jax" )
__UpperCamelCase = model.generate(**_lowercase , **_lowercase )
__UpperCamelCase = "Sam is a great name. It means \"sun\" in Gaelic."
__UpperCamelCase = tokenizer.batch_decode(_lowercase , **_lowercase )
assert generated_txt[0].strip() == tgt_text
| 505
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
UpperCamelCase : Any = {"mobilebert-uncased": 512}
UpperCamelCase : Any = {}
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Optional[int]=True , _lowercase : int="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Any="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : Any=None , **_lowercase : Optional[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
A = getattr(_lowercase , normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_lowercase )
A = do_lower_case
def __a ( self : List[Any] , _lowercase : Tuple , _lowercase : Any=None ):
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 690
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase = None
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCamelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCamelCase = """▁"""
# Segments (not really needed)
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 2
lowerCamelCase = 3
lowerCamelCase = 4
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Union[str, Any] = VOCAB_FILES_NAMES
A :str = PRETRAINED_VOCAB_FILES_MAP
A :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A :Optional[Any] = "left"
A :Optional[Any] = XLNetTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , **__UpperCAmelCase , ):
"""simple docstring"""
a__ : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
a__ : str = 3
a__ : Optional[int] = do_lower_case
a__ : Optional[Any] = remove_space
a__ : Optional[int] = keep_accents
a__ : Union[str, Any] = vocab_file
a__ : Union[str, Any] = False if not self.vocab_file else True
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : int = [self.sep_token_id]
a__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _A ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
a__ : Dict = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 712
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Dict:
a__ : List[Any] = checkpoint
a__ : List[Any] = {}
a__ : int = vae_state_dict["encoder.conv_in.weight"]
a__ : Any = vae_state_dict["encoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["encoder.conv_out.weight"]
a__ : Optional[Any] = vae_state_dict["encoder.conv_out.bias"]
a__ : List[str] = vae_state_dict["encoder.norm_out.weight"]
a__ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
a__ : Optional[Any] = vae_state_dict["decoder.conv_in.weight"]
a__ : Dict = vae_state_dict["decoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["decoder.conv_out.weight"]
a__ : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
a__ : Dict = vae_state_dict["decoder.norm_out.weight"]
a__ : int = vae_state_dict["decoder.norm_out.bias"]
a__ : Any = vae_state_dict["quant_conv.weight"]
a__ : Any = vae_state_dict["quant_conv.bias"]
a__ : str = vae_state_dict["post_quant_conv.weight"]
a__ : Any = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
a__ : Optional[int] = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
a__ : int = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
a__ : Tuple = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
a__ : Optional[Any] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
a__ : List[str] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
a__ : Optional[Any] = renew_vae_resnet_paths(__UpperCamelCase )
a__ : List[str] = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Any = [key for key in vae_state_dict if "encoder.mid.block" in key]
a__ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[Any] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
a__ : Dict = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Dict = [key for key in vae_state_dict if "encoder.mid.attn" in key]
a__ : Optional[int] = renew_vae_attention_paths(__UpperCamelCase )
a__ : str = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
a__ : Optional[Any] = num_up_blocks - 1 - i
a__ : str = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
a__ : Dict = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
a__ : Optional[int] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
a__ : int = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Optional[Any] = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Union[str, Any] = [key for key in vae_state_dict if "decoder.mid.block" in key]
a__ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
a__ : List[str] = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : int = [key for key in vae_state_dict if "decoder.mid.attn" in key]
a__ : str = renew_vae_attention_paths(__UpperCamelCase )
a__ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , ) -> str:
# Only support V1
a__ : Optional[int] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
a__ : Any = io.BytesIO(r.content )
a__ : int = OmegaConf.load(__UpperCamelCase )
a__ : Any = 5_12
a__ : str = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
a__ : Optional[Any] = {}
with safe_open(__UpperCamelCase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
a__ : Tuple = f.get_tensor(__UpperCamelCase )
else:
a__ : List[Any] = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["state_dict"]
# Convert the VAE model.
a__ : Optional[int] = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
a__ : List[Any] = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
a__ : Optional[int] = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 207
| 0
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowercase : Any, lowercase : Dict, lowercase : str ) -> Dict:
"""simple docstring"""
_UpperCamelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 98
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694
| 0
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
random.seed(a_ )
np.random.seed(a_ )
torch.manual_seed(a_ )
torch.cuda.manual_seed_all(a_ )
# ^^ safe to call this function even if cuda is not available
class A :
"""simple docstring"""
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 0.99_99 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0 , __lowerCAmelCase = False , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 2 / 3 , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase_ : Dict = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase_ : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase_ : Optional[int] = True
if kwargs.get("""max_value""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : Tuple = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase_ : str = kwargs["""max_value"""]
if kwargs.get("""min_value""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : Dict = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = kwargs["""min_value"""]
UpperCamelCase_ : Optional[Any] = list(__lowerCAmelCase )
UpperCamelCase_ : Any = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , __lowerCAmelCase ) is not None:
UpperCamelCase_ : str = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase )
self.to(device=kwargs["""device"""] )
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : Optional[Any] = decay
UpperCamelCase_ : List[str] = min_decay
UpperCamelCase_ : int = update_after_step
UpperCamelCase_ : Optional[int] = use_ema_warmup
UpperCamelCase_ : Optional[int] = inv_gamma
UpperCamelCase_ : Any = power
UpperCamelCase_ : str = 0
UpperCamelCase_ : List[str] = None # set in `step()`
UpperCamelCase_ : Union[str, Any] = model_cls
UpperCamelCase_ : Any = model_config
@classmethod
def _UpperCAmelCase ( cls , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : int = model_cls.load_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase )
UpperCamelCase_ : str = model_cls.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = cls(model.parameters() , model_cls=__lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(__lowerCAmelCase )
return ema_model
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
UpperCamelCase_ : int = self.model_cls.from_config(self.model_config )
UpperCamelCase_ : List[Any] = self.state_dict()
state_dict.pop("""shadow_params""" , __lowerCAmelCase )
model.register_to_config(**__lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Any = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase_ : List[Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase_ : List[Any] = (1 + step) / (10 + step)
UpperCamelCase_ : Optional[Any] = min(__lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase_ : Optional[Any] = max(__lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.nn.Module ):
UpperCamelCase_ : str = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , __lowerCAmelCase , standard_warn=__lowerCAmelCase , )
UpperCamelCase_ : int = parameters.parameters()
UpperCamelCase_ : Optional[int] = list(__lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase_ : Any = self.get_decay(self.optimization_step )
UpperCamelCase_ : List[str] = decay
UpperCamelCase_ : Any = 1 - decay
UpperCamelCase_ : Optional[int] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase_ : Optional[Any] = deepspeed.zero.GatheredParameters(__lowerCAmelCase , modifier_rank=__lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : str = list(__lowerCAmelCase )
for s_param, param in zip(self.shadow_params , __lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def _UpperCAmelCase ( self , __lowerCAmelCase=None , __lowerCAmelCase=None ):
UpperCamelCase_ : Union[str, Any] = [
p.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) if p.is_floating_point() else p.to(device=__lowerCAmelCase )
for p in self.shadow_params
]
def _UpperCAmelCase ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[str] = [param.detach().cpu().clone() for param in parameters]
def _UpperCAmelCase ( self , __lowerCAmelCase ):
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , __lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase_ : List[Any] = None
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = copy.deepcopy(__lowerCAmelCase )
UpperCamelCase_ : int = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
UpperCamelCase_ : Dict = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , __lowerCAmelCase ):
raise ValueError("""Invalid min_decay""" )
UpperCamelCase_ : str = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , __lowerCAmelCase ):
raise ValueError("""Invalid optimization_step""" )
UpperCamelCase_ : Dict = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , __lowerCAmelCase ):
raise ValueError("""Invalid update_after_step""" )
UpperCamelCase_ : List[str] = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __lowerCAmelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
UpperCamelCase_ : Any = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
UpperCamelCase_ : str = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
UpperCamelCase_ : Dict = state_dict.get("""shadow_params""" , __lowerCAmelCase )
if shadow_params is not None:
UpperCamelCase_ : Any = shadow_params
if not isinstance(self.shadow_params , __lowerCAmelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(__lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 703
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
UpperCamelCase =re.compile(R"([A-Z]+)([A-Z][a-z])")
UpperCamelCase =re.compile(R"([a-z\d])([A-Z])")
UpperCamelCase =re.compile(R"(?<!_)_(?!_)")
UpperCamelCase =re.compile(R"(_{2,})")
UpperCamelCase =R"^\w+(\.\w+)*$"
UpperCamelCase =R"<>:/\|?*"
def snake_case ( a_ : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = _uppercase_uppercase_re.sub(r"""\1_\2""" , a_ )
UpperCamelCase_ : List[Any] = _lowercase_uppercase_re.sub(r"""\1_\2""" , a_ )
return name.lower()
def snake_case ( a_ : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = _single_underscore_re.split(a_ )
UpperCamelCase_ : Tuple = [_multiple_underscores_re.split(a_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(a_ ) if n != """""" )
def snake_case ( a_ : Optional[int] ) -> str:
"""simple docstring"""
if os.path.basename(a_ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(a_ )
def snake_case ( a_ : List[Any] , a_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(a_ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , a_ ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(a_ )}-{split}"
def snake_case ( a_ : Optional[Any] , a_ : int , a_ : List[Any] , a_ : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = filename_prefix_for_split(a_ , a_ )
if filetype_suffix:
prefix += f".{filetype_suffix}"
UpperCamelCase_ : str = os.path.join(a_ , a_ )
return f"{filepath}*"
def snake_case ( a_ : Tuple , a_ : Any , a_ : Optional[int] , a_ : Union[str, Any]=None , a_ : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = filename_prefix_for_split(a_ , a_ )
UpperCamelCase_ : Dict = os.path.join(a_ , a_ )
if shard_lengths:
UpperCamelCase_ : Optional[int] = len(a_ )
UpperCamelCase_ : Tuple = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(a_ )]
if filetype_suffix:
UpperCamelCase_ : Tuple = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCamelCase_ : Dict = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 543
| 0
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Any = False, False, False
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case = None
snake_case = True
snake_case = True
snake_case = None
# Automatically constructed
snake_case = "dict"
snake_case = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
snake_case = field(default="Audio" , init=_lowercase , repr=_lowercase )
def __call__( self : Dict ) -> str:
"""simple docstring"""
return self.pa_type
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Union[str, bytes, dict] ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_snake_case , _snake_case ):
return {"bytes": None, "path": value}
elif isinstance(_snake_case , _snake_case ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
A_ = BytesIO()
sf.write(_snake_case , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
A_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
A_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32_767
A_ = BytesIO(bytes() )
sf.write(_snake_case , _snake_case , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase__ ( self : Any , _snake_case : dict , _snake_case : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
A_ , A_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
A_ = xsplitext(_snake_case )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
A_ = token_per_repo_id or {}
A_ = path.split("::" )[-1]
try:
A_ = string_to_dict(_snake_case , config.HUB_DATASETS_URL )["repo_id"]
A_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
A_ = None
with xopen(_snake_case , "rb" , use_auth_token=_snake_case ) as f:
A_ , A_ = sf.read(_snake_case )
else:
A_ , A_ = sf.read(_snake_case )
A_ = array.T
if self.mono:
A_ = librosa.to_mono(_snake_case )
if self.sampling_rate and self.sampling_rate != sampling_rate:
A_ = librosa.resample(_snake_case , orig_sr=_snake_case , target_sr=self.sampling_rate )
A_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase__ ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase__ ( self : List[Any] , _snake_case : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
A_ = pa.array([None] * len(_snake_case ) , type=pa.binary() )
A_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ = pa.array([None] * len(_snake_case ) , type=pa.string() )
A_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
A_ = pa.array([Audio().encode_example(_snake_case ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
A_ = storage.field("bytes" )
else:
A_ = pa.array([None] * len(_snake_case ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
A_ = storage.field("path" )
else:
A_ = pa.array([None] * len(_snake_case ) , type=pa.string() )
A_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(_snake_case , self.pa_type )
def lowerCamelCase__ ( self : Tuple , _snake_case : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_snake_case : List[Any] ):
with xopen(_snake_case , "rb" ) as f:
A_ = f.read()
return bytes_
A_ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ = pa.array(
[os.path.basename(_snake_case ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
A_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(_snake_case , self.pa_type )
| 115
|
"""simple docstring"""
from __future__ import annotations
def A_ (__a ):
'''simple docstring'''
A_ = 0.00
A_ = 0
for resistor in resistors:
if resistor <= 0:
A_ = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(__a )
first_sum += 1 / float(__a )
index += 1
return 1 / first_sum
def A_ (__a ):
'''simple docstring'''
A_ = 0.00
A_ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
A_ = f'Resistor at index {index} has a negative value!'
raise ValueError(__a )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 688
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase_ , )
assert hasattr(self , '''env''' )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : str = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
__lowercase : List[str] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase_ , instance_count=UpperCamelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase_ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase_ , py_version='''py36''' , )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]:
TrainingJobAnalytics(UpperCamelCase_ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Any:
# create estimator
__lowercase : List[str] = self.create_estimator(UpperCamelCase_ )
# run training
estimator.fit()
# result dataframe
__lowercase : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowercase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase_ )
| 76
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class A ( __UpperCAmelCase ):
def __init__( self, **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self, '''vision''' )
self.check_model_type(UpperCamelCase__ )
def __call__( self, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
if "text_queries" in kwargs:
lowerCAmelCase_ = kwargs.pop('''text_queries''' )
if isinstance(UpperCamelCase__, (str, Image.Image) ):
lowerCAmelCase_ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(UpperCamelCase__, **UpperCamelCase__ )
return results
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {}
if "threshold" in kwargs:
lowerCAmelCase_ = kwargs['''threshold''']
if "top_k" in kwargs:
lowerCAmelCase_ = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = load_image(inputs['''image'''] )
lowerCAmelCase_ = inputs['''candidate_labels''']
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = candidate_labels.split(''',''' )
lowerCAmelCase_ = torch.tensor([[image.height, image.width]], dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase__ ):
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, return_tensors=self.framework )
lowerCAmelCase_ = self.image_processor(UpperCamelCase__, return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = model_inputs.pop('''target_size''' )
lowerCAmelCase_ = model_inputs.pop('''candidate_label''' )
lowerCAmelCase_ = model_inputs.pop('''is_last''' )
lowerCAmelCase_ = self.model(**UpperCamelCase__ )
lowerCAmelCase_ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=0.1, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = []
for model_output in model_outputs:
lowerCAmelCase_ = model_output['''candidate_label''']
lowerCAmelCase_ = BaseModelOutput(UpperCamelCase__ )
lowerCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase__, threshold=UpperCamelCase__, target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase_ = outputs['''scores'''][index].item()
lowerCAmelCase_ = self._get_bounding_box(outputs['''boxes'''][index][0] )
lowerCAmelCase_ = {'''score''': score, '''label''': label, '''box''': box}
results.append(UpperCamelCase__ )
lowerCAmelCase_ = sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x["score"], reverse=UpperCamelCase__ )
if top_k:
lowerCAmelCase_ = results[:top_k]
return results
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = box.int().tolist()
lowerCAmelCase_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 431
| 0
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = "T5Config"
class __magic_name__ ( _a):
_UpperCAmelCase : Any = 'mt5'
_UpperCAmelCase : Dict = MTaConfig
class __magic_name__ ( _a):
_UpperCAmelCase : int = 'mt5'
_UpperCAmelCase : List[Any] = MTaConfig
class __magic_name__ ( _a):
_UpperCAmelCase : Dict = 'mt5'
_UpperCAmelCase : List[str] = MTaConfig
| 713
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _a):
@require_torch
def _UpperCAmelCase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : Optional[int] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : str ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
# next emulate no network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = "\nfrom transformers import pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
UpperCAmelCase = self.get_env()
UpperCAmelCase = "1"
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" ,result.stderr.decode().replace("\n" ,"" ) ,)
@require_torch
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = "\nfrom transformers import AutoModel\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
| 405
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = AudioLDMPipeline
lowerCAmelCase__ = TEXT_TO_AUDIO_PARAMS
lowerCAmelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowerCAmelCase , )
UpperCAmelCase__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
UpperCAmelCase__ : Optional[int] = ClapTextModelWithProjection(__lowerCAmelCase )
UpperCAmelCase__ : List[str] = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
UpperCAmelCase__ : Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowerCAmelCase , )
UpperCAmelCase__ : List[Any] = SpeechTaHifiGan(__lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def lowercase_ ( self : Optional[int] , _A : Tuple , _A : str=0 ):
'''simple docstring'''
if str(__lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase__ : Optional[int] = torch.manual_seed(__lowerCAmelCase )
else:
UpperCAmelCase__ : Tuple = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCAmelCase__ : str = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : int = AudioLDMPipeline(**__lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : int = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase__ : int = audioldm_pipe(**__lowerCAmelCase )
UpperCAmelCase__ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) == 256
UpperCAmelCase__ : Any = audio[:10]
UpperCAmelCase__ : List[Any] = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_dummy_components()
UpperCAmelCase__ : Optional[Any] = AudioLDMPipeline(**__lowerCAmelCase )
UpperCAmelCase__ : Tuple = audioldm_pipe.to(__lowerCAmelCase )
UpperCAmelCase__ : Dict = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : str = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase__ : str = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase__ : Dict = audioldm_pipe(**__lowerCAmelCase )
UpperCAmelCase__ : Dict = output.audios[0]
UpperCAmelCase__ : Dict = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase__ : Dict = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase__ : Dict = audioldm_pipe.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
UpperCAmelCase__ : Optional[Any] = text_inputs['''input_ids'''].to(__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = audioldm_pipe.text_encoder(
__lowerCAmelCase , )
UpperCAmelCase__ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase__ : Optional[int] = F.normalize(__lowerCAmelCase , dim=-1 )
UpperCAmelCase__ : Optional[int] = prompt_embeds
# forward
UpperCAmelCase__ : Tuple = audioldm_pipe(**__lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = AudioLDMPipeline(**__lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = audioldm_pipe.to(__lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase__ : Dict = 3 * ['''this is a negative prompt''']
UpperCAmelCase__ : Any = negative_prompt
UpperCAmelCase__ : int = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase__ : Optional[int] = audioldm_pipe(**__lowerCAmelCase )
UpperCAmelCase__ : List[str] = output.audios[0]
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase__ : Any = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase__ : Dict = []
for p in [prompt, negative_prompt]:
UpperCAmelCase__ : Optional[int] = audioldm_pipe.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
UpperCAmelCase__ : str = text_inputs['''input_ids'''].to(__lowerCAmelCase )
UpperCAmelCase__ : int = audioldm_pipe.text_encoder(
__lowerCAmelCase , )
UpperCAmelCase__ : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase__ : List[str] = F.normalize(__lowerCAmelCase , dim=-1 )
embeds.append(__lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : int = embeds
# forward
UpperCAmelCase__ : Dict = audioldm_pipe(**__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = AudioLDMPipeline(**__lowerCAmelCase )
UpperCAmelCase__ : Any = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase__ : Dict = '''egg cracking'''
UpperCAmelCase__ : Optional[int] = audioldm_pipe(**__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) == 256
UpperCAmelCase__ : Optional[int] = audio[:10]
UpperCAmelCase__ : Dict = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
UpperCAmelCase__ : str = AudioLDMPipeline(**__lowerCAmelCase )
UpperCAmelCase__ : List[str] = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase__ : Optional[int] = audioldm_pipe(__lowerCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Any = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : List[str] = audioldm_pipe(__lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : List[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowerCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = AudioLDMPipeline(**__lowerCAmelCase )
UpperCAmelCase__ : Tuple = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Tuple = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(__lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_1_6 , **__lowerCAmelCase )
UpperCAmelCase__ : int = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) / vocoder_sampling_rate == 0.0_1_6
UpperCAmelCase__ : int = audioldm_pipe(audio_length_in_s=0.0_3_2 , **__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) / vocoder_sampling_rate == 0.0_3_2
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_dummy_components()
UpperCAmelCase__ : List[str] = AudioLDMPipeline(**__lowerCAmelCase )
UpperCAmelCase__ : List[str] = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Any = ['''hey''']
UpperCAmelCase__ : List[str] = audioldm_pipe(__lowerCAmelCase , num_inference_steps=1 )
UpperCAmelCase__ : Tuple = output.audios.shape
assert audio_shape == (1, 256)
UpperCAmelCase__ : int = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase__ : Tuple = SpeechTaHifiGan(__lowerCAmelCase ).to(__lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = audioldm_pipe(__lowerCAmelCase , num_inference_steps=1 )
UpperCAmelCase__ : Union[str, Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowerCAmelCase )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowerCAmelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase )
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : List[str]="cpu" , _A : Optional[Any]=torch.floataa , _A : Optional[int]=0 ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCAmelCase__ : int = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 8, 128, 16) )
UpperCAmelCase__ : Union[str, Any] = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
UpperCAmelCase__ : Tuple = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
UpperCAmelCase__ : List[Any] = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_inputs(__lowerCAmelCase )
UpperCAmelCase__ : Tuple = 25
UpperCAmelCase__ : str = audioldm_pipe(**__lowerCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) == 81_920
UpperCAmelCase__ : Optional[Any] = audio[77_230:77_240]
UpperCAmelCase__ : Optional[int] = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
UpperCAmelCase__ : List[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
UpperCAmelCase__ : Optional[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase__ : Tuple = audioldm_pipe.to(__lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = self.get_inputs(__lowerCAmelCase )
UpperCAmelCase__ : Dict = audioldm_pipe(**__lowerCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__lowerCAmelCase ) == 81_920
UpperCAmelCase__ : Tuple = audio[27_780:27_790]
UpperCAmelCase__ : str = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
UpperCAmelCase__ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 75
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Dict = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''glpn'''
def __init__( self : Optional[int] , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=[2, 2, 2, 2] , __lowerCAmelCase : int=[8, 4, 2, 1] , __lowerCAmelCase : int=[32, 64, 1_60, 2_56] , __lowerCAmelCase : str=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : List[str]=[1, 2, 5, 8] , __lowerCAmelCase : List[Any]=[4, 4, 4, 4] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=1e-6 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : Union[str, Any]=-1 , **__lowerCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = max_depth
A__ = head_in_index
| 176
| 0
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/'))
UpperCamelCase__ : int = self.diffusers_dir
shutil.copy(
os.path.join(UpperCAmelCase_ , 'src/diffusers/schedulers/scheduling_ddpm.py') , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py') , )
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : List[str] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=None):
UpperCamelCase__ : Optional[int] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCamelCase__ : List[str] = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCamelCase__ : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119)
UpperCamelCase__ : Tuple = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_)
UpperCamelCase__ : List[str] = os.path.join(self.diffusers_dir , 'new_code.py')
with open(UpperCAmelCase_ , 'w' , newline='\n') as f:
f.write(UpperCAmelCase_)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase_)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase_)
with open(UpperCAmelCase_ , 'r') as f:
self.assertTrue(f.read() , UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : List[Any] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput')
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
# Base copy consistency
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , UpperCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , UpperCAmelCase_) , )
# Copy consistency with a really long name
UpperCamelCase__ : List[str] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('Bert' , UpperCAmelCase_ , UpperCAmelCase_) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , UpperCAmelCase_ , overwrite_result=re.sub('DDPM' , 'Test' , UpperCAmelCase_) , )
| 6
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase__ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
UpperCamelCase__ : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , UpperCAmelCase_)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict , **UpperCAmelCase_ : Union[str, Any]):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , **UpperCAmelCase_ : str):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Union[str, Any]):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str):
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_slow.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_)
UpperCamelCase__ : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
processor_fast.save_pretrained(self.tmpdirname)
UpperCamelCase__ : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCamelCase__ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
UpperCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0)
UpperCamelCase__ : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : int = self.prepare_image_inputs()
UpperCamelCase__ : int = image_processor(UpperCAmelCase_ , return_tensors='np')
UpperCamelCase__ : Optional[int] = processor(images=UpperCAmelCase_ , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Any = 'lower newer'
UpperCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer(UpperCAmelCase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Optional[int] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'lower newer'
UpperCamelCase__ : List[Any] = self.prepare_image_inputs()
UpperCamelCase__ : str = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_):
processor()
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ : List[Any] = processor.batch_decode(UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = self.get_image_processor()
UpperCamelCase__ : List[str] = self.get_tokenizer()
UpperCamelCase__ : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'lower newer'
UpperCamelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 6
| 1
|
"""simple docstring"""
def __lowerCamelCase ( a_ : str ) -> bool:
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def __lowerCamelCase ( a_ : str ) -> bool:
__SCREAMING_SNAKE_CASE :Dict = credit_card_number
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
__SCREAMING_SNAKE_CASE :Optional[Any] = len(a_ ) - 2
for i in range(a_ , -1 , -2 ):
# double the value of every second digit
__SCREAMING_SNAKE_CASE :Optional[int] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__SCREAMING_SNAKE_CASE :Optional[Any] = cc_number[:i] + str(a_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a_ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __lowerCamelCase ( a_ : str ) -> bool:
__SCREAMING_SNAKE_CASE :List[Any] = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(a_ ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(a_ ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(a_ ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 498
|
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : str = None
@staticmethod
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
raise NotImplementedError
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def _UpperCamelCase ( cls ) -> str:
"""simple docstring"""
return f'''`pip install {cls.pip_package or cls.name}`'''
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''optuna'''
@staticmethod
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return is_optuna_available()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return run_hp_search_optuna(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
return default_hp_space_optuna(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Tuple = '''ray'''
SCREAMING_SNAKE_CASE_ : Dict = '''\'ray[tune]\''''
@staticmethod
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
return is_ray_available()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
return run_hp_search_ray(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return default_hp_space_ray(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''sigopt'''
@staticmethod
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
return is_sigopt_available()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return run_hp_search_sigopt(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return default_hp_space_sigopt(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = '''wandb'''
@staticmethod
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return is_wandb_available()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return run_hp_search_wandb(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return default_hp_space_wandb(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __lowerCamelCase ( ) -> str:
__SCREAMING_SNAKE_CASE :Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a_ ) > 0:
__SCREAMING_SNAKE_CASE :str = available_backends[0].name
if len(a_ ) > 1:
logger.info(
f'''{len(a_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 498
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( A_ : Dict , A_ : Optional[Any]=False ):
snake_case : List[Any] = OmegaConf.load(__UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCAmelCase ) ) )
return config
def A ( A_ : Dict , A_ : Optional[int]=None , A_ : Optional[Any]=None ):
if conf_path is None:
snake_case : List[str] = '''./model_checkpoints/vqgan_only.yaml'''
snake_case : List[Any] = load_config(__UpperCAmelCase , display=__UpperCAmelCase )
snake_case : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
snake_case : str = '''./model_checkpoints/vqgan_only.pt'''
snake_case : Union[str, Any] = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
if ".ckpt" in ckpt_path:
snake_case : Union[str, Any] = sd['''state_dict''']
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.to(__UpperCAmelCase )
del sd
return model
def A ( A_ : List[str] , A_ : int ):
snake_case, snake_case, snake_case : Dict = model.encode(__UpperCAmelCase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
snake_case : List[Any] = model.decode(__UpperCAmelCase )
return xrec
def A ( A_ : Tuple , A_ : int=False ):
snake_case, snake_case : Optional[Any] = string.rsplit('''.''' , 1 )
if reload:
snake_case : Any = importlib.import_module(__UpperCAmelCase )
importlib.reload(__UpperCAmelCase )
return getattr(importlib.import_module(__UpperCAmelCase , package=__UpperCAmelCase ) , cls )
def A ( A_ : List[str] ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def A ( A_ : str , A_ : Optional[Any] , A_ : int=True , A_ : Union[str, Any]=True ):
snake_case : Any = instantiate_from_config(__UpperCAmelCase )
if sd is not None:
model.load_state_dict(__UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( A_ : int , A_ : Dict , A_ : Dict , A_ : str ):
if ckpt:
snake_case : Union[str, Any] = torch.load(__UpperCAmelCase , map_location='''cpu''' )
snake_case : Union[str, Any] = pl_sd['''global_step''']
print(F"""loaded model from global step {global_step}.""" )
else:
snake_case : Any = {'''state_dict''': None}
snake_case : List[Any] = None
snake_case : str = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__UpperCAmelCase , eval_mode=__UpperCAmelCase )['''model''']
return model, global_step
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
def A ( A_ : int ):
if num <= 0:
snake_case : List[Any] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(A_ )
snake_case : Optional[Any] = [True] * (num + 1)
snake_case : List[str] = []
snake_case : List[Any] = 2
snake_case : str = int(math.sqrt(A_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , A_ ):
if sieve[i] is True:
snake_case : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 555
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__: Optional[Any] = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A__: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
if n == 1 or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_a : Dict =[0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
_a : Union[str, Any] =0
_a : Optional[Any] =2
while digits < n:
index += 1
_a : Optional[int] =len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 1000 ) -> int:
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 694
| 1
|
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return math.pow(__a , 2 ) - a
def SCREAMING_SNAKE_CASE__ ( __a ):
return 2 * x
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[Any] = 2.0
while start <= a:
snake_case_ : Optional[int] = math.pow(__a , 2 )
return start
def SCREAMING_SNAKE_CASE__ ( __a , __a = 99_99 , __a = 0.00000000000001 ):
if a < 0:
raise ValueError('math domain error' )
snake_case_ : Any = get_initial_point(__a )
for _ in range(__a ):
snake_case_ : int = value
snake_case_ : List[Any] = value - fx(__a , __a ) / fx_derivative(__a )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 534
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@property
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.dummy_uncond_unet
snake_case_ : Optional[Any] = ScoreSdeVeScheduler()
snake_case_ : Tuple = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Tuple = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_A ).images
snake_case_ : int = torch.manual_seed(0 )
snake_case_ : Optional[int] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_A , return_dict=_A )[
0
]
snake_case_ : List[str] = image[0, -3:, -3:, -1]
snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> Dict:
"""simple docstring"""
snake_case_ : Dict = 'google/ncsnpp-church-256'
snake_case_ : List[Any] = UNetaDModel.from_pretrained(_A )
snake_case_ : str = ScoreSdeVeScheduler.from_pretrained(_A )
snake_case_ : Optional[Any] = ScoreSdeVePipeline(unet=_A , scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
snake_case_ : Any = torch.manual_seed(0 )
snake_case_ : Optional[int] = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_A ).images
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ : Dict = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 534
| 1
|
import datasets
from .evaluate import evaluate
_lowercase = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
_lowercase = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
_lowercase = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) ,codebase_urls=["https://www.atticusprojectai.org/cuad"] ,reference_urls=["https://www.atticusprojectai.org/cuad"] ,)
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowerCAmelCase_ : Tuple = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowerCAmelCase_ : Any = evaluate(dataset=lowerCAmelCase__ ,predictions=lowerCAmelCase__ )
return score
| 659
|
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
def _a ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34_423]
snake_case_ = math.log(len(_SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 2
|
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
__SCREAMING_SNAKE_CASE : List[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case_ = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case_ = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
snake_case_ = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case_ = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = {}
import re
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case_ = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case_ = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case_ = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case_ = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case_ = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ = {"""1""": 1, """3""": 2}[groups[-2]]
snake_case_ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case_ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case_ = prefix + resnet_block
snake_case_ = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
snake_case_ = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
snake_case_ = regex_match.groups()
snake_case_ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case_ = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
snake_case_ = original_key
snake_case_ = replace_key(_SCREAMING_SNAKE_CASE )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
snake_case_ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case_ = original_key
snake_case_ = original_key
snake_case_ = value
return new_dict
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
snake_case_ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_SCREAMING_SNAKE_CASE )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , """wb""" ).write(r.content )
snake_case_ = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case_ = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = JukeboxModel(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["""model"""]
snake_case_ = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case_ = old_dic[k]
elif k.endswith(""".w""" ):
snake_case_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ = old_dic[k]
else:
snake_case_ = old_dic[k]
snake_case_ = """vqvae""" if i == 0 else f"""priors.{3 - i}"""
snake_case_ = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
snake_case_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , """w""" ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 2
| 1
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''allenai/led-base-16384''': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase__ ( ):
__a : List[str] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__a : str = bs[:]
__a : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase_ )
cs.append(2**8 + n )
n += 1
__a : Dict = [chr(lowerCamelCase_ ) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Optional[Any] = set()
__a : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a : Dict = char
return pairs
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple="replace" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
__a : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
__a : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
__a : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
__a : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
__a : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as vocab_handle:
__a : List[Any] = json.load(SCREAMING_SNAKE_CASE__ )
__a : List[str] = {v: k for k, v in self.encoder.items()}
__a : Dict = errors # how to handle errors in decoding
__a : List[Any] = bytes_to_unicode()
__a : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as merges_handle:
__a : str = merges_handle.read().split('\n' )[1:-1]
__a : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : Tuple = {}
__a : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a : Optional[Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__a : List[Any] = tuple(SCREAMING_SNAKE_CASE__ )
__a : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
__a : Tuple = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a : str = bigram
__a : Optional[Any] = []
__a : List[str] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
__a : str = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a : List[Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a : Optional[int] = tuple(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
__a : Dict = get_pairs(SCREAMING_SNAKE_CASE__ )
__a : Tuple = ' '.join(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = word
return word
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a : int = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(' ' ) )
return bpe_tokens
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a : Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : int = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__a : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '\n' )
__a : List[Any] = 0
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
__a : Optional[int] = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE__ ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
__a : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a : Any = [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
__a : Tuple = ' ' + text
return (text, kwargs)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ):
'''simple docstring'''
__a : Union[str, Any] = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
# Load from model defaults
if return_attention_mask is None:
__a : Tuple = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__a : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__a : int = len(encoded_inputs['global_attention_mask'] ) != len(SCREAMING_SNAKE_CASE__ )
if needs_to_be_padded:
__a : Any = len(SCREAMING_SNAKE_CASE__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__a : Optional[Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__a : List[str] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 47
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
| 0
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any], _lowerCamelCase : int = 1_01 ):
'''simple docstring'''
__A = length
def __len__( self : int ):
'''simple docstring'''
return self.length
def __getitem__( self : List[Any], _lowerCamelCase : int ):
'''simple docstring'''
return i
class snake_case :
'''simple docstring'''
def __call__( self : Any, _lowerCamelCase : List[str] ):
'''simple docstring'''
return {"input_ids": torch.tensor(_lowerCamelCase ), "labels": torch.tensor(_lowerCamelCase )}
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__A = nn.Linear(1_20, 80 )
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Tuple, _lowerCamelCase : str=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device ), input_ids
else:
return input_ids
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
@require_torch_neuroncore
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__A = self.get_auto_remove_tmp_dir()
__A = f'--output_dir {output_dir}'.split()
__A = ['torchrun'] + distributed_args + args
execute_subprocess_async(_lowerCamelCase, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__A = self.get_auto_remove_tmp_dir()
__A = f'--output_dir {output_dir}'.split()
__A = ['torchrun'] + distributed_args + args
execute_subprocess_async(_lowerCamelCase, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
UpperCAmelCase__ = HfArgumentParser((TrainingArguments,))
UpperCAmelCase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
UpperCAmelCase__ = DummyDataset(dataset_length)
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = list(range(len(SCREAMING_SNAKE_CASE_ ) ) )
__A = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
UpperCAmelCase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
UpperCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase__ = 2
UpperCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
UpperCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
UpperCAmelCase__ = None
| 703
|
"""simple docstring"""
lowercase_ = 8.31_4462 # Unit - J mol-1 K-1
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 215
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
__magic_name__ : Tuple = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def UpperCamelCase ():
UpperCamelCase : List[Any] = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE ) )
UpperCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE , """words.txt""" )
UpperCamelCase : Optional[Any] = """"""
with open(SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase : Dict = f.readline()
UpperCamelCase : int = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
UpperCamelCase : Union[str, Any] = [
word
for word in [sum(ord(SCREAMING_SNAKE_CASE ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 102
|
def _lowerCamelCase ( __A : int ) -> str:
_UpperCAmelCase : Tuple = int(__A )
if decimal in (0, 1): # Exit cases for the recursion
return str(__A )
_UpperCAmelCase , _UpperCAmelCase : int = divmod(__A , 2 )
return binary_recursive(__A ) + str(__A )
def _lowerCamelCase ( __A : str ) -> str:
_UpperCAmelCase : List[Any] = str(__A ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCAmelCase : Tuple = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCAmelCase : Dict = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(__A ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 485
| 0
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
lowerCamelCase_ : Dict = PegasusTokenizer
lowerCamelCase_ : Optional[int] = PegasusTokenizerFast
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[int] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self ) -> str:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[Any]:
return ("This is a test", "This is a test")
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = """</s>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(lowerCamelCase ) , 1103 )
def lowerCAmelCase_ ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
snake_case_ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
snake_case_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
snake_case_ = """To ensure a smooth flow of bank resolutions."""
snake_case_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = ["""This is going to be way too long.""" * 150, """short example"""]
snake_case_ = ["""not super long but more than 5 tokens""", """tiny"""]
snake_case_ = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
snake_case_ = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
# fmt: off
snake_case_ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = PegasusTokenizer
lowerCamelCase_ : int = PegasusTokenizerFast
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : int = True
def lowerCAmelCase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(lowerCamelCase , offset=0 , mask_token_sent=lowerCamelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> int:
return ("This is a test", "This is a test")
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase ).input_ids[0]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_torch
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = ["""This is going to be way too long.""" * 1000, """short example"""]
snake_case_ = ["""not super long but more than 5 tokens""", """tiny"""]
snake_case_ = self._large_tokenizer(lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
snake_case_ = self._large_tokenizer(
text_target=lowerCamelCase , max_length=5 , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCamelCase ) == 2 # input_ids, attention_mask.
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
snake_case_ = self._large_tokenizer(lowerCamelCase ).input_ids
self.assertListEqual(
lowerCamelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 161
|
import unittest
import numpy as np
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , ) -> np.ndarray:
'''simple docstring'''
snake_case_ = np.shape(lowercase_ )
snake_case_ = np.shape(lowercase_ )
snake_case_ = np.shape(lowercase_ )
if shape_a[0] != shape_b[0]:
snake_case_ = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(lowercase_ )
if shape_b[1] != shape_c[1]:
snake_case_ = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(lowercase_ )
snake_case_ = pseudo_inv
if a_inv is None:
try:
snake_case_ = np.linalg.inv(lowercase_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
snake_case_ = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case_ = np.block([[a, b], [b.T, c]] )
snake_case_ = np.linalg.det(lowerCamelCase )
snake_case_ = np.linalg.det(lowerCamelCase )
snake_case_ = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 161
| 1
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase : Dict =logging.get_logger(__name__)
class __a ( A__ ):
_lowerCAmelCase : Tuple = ['''audio_values''', '''audio_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple=20_48 , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : Any=[16, 16] , SCREAMING_SNAKE_CASE : List[str]=1_28 , SCREAMING_SNAKE_CASE : int=4_41_00 , SCREAMING_SNAKE_CASE : Optional[Any]=86 , SCREAMING_SNAKE_CASE : Optional[Any]=20_48 , SCREAMING_SNAKE_CASE : Any=0.0 , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(
feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple = spectrogram_length
UpperCamelCase__ : int = num_channels
UpperCamelCase__ : Any = patch_size
UpperCamelCase__ : Optional[int] = feature_size // self.patch_size[1]
UpperCamelCase__ : Union[str, Any] = n_fft
UpperCamelCase__ : str = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase__ : List[Any] = sampling_rate
UpperCamelCase__ : int = padding_value
UpperCamelCase__ : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , ).T
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
UpperCamelCase__ : Any = spectrogram(
SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=8_0.0 , )
UpperCamelCase__ : List[Any] = log_spec[:, :-1]
UpperCamelCase__ : Tuple = log_spec - 2_0.0
UpperCamelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[bool] = True , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase__ : Any = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase__ : Union[str, Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase__ : int = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ : Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase__ : Tuple = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : int = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase__ : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase__ : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase__ : Tuple = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase__ : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase__ : Tuple = np.ones([len(SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase__ : int = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Tuple = audio_features[i]
UpperCamelCase__ : Tuple = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase__ : Optional[int] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase__ : Union[str, Any] = {"audio_values": padded_audio_features}
UpperCamelCase__ : Dict = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
return encoded_inputs
| 228
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Image:
def brightness(__lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCamelCase : Optional[int] =change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 228
| 1
|
import math
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if (
not isinstance(_UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if (
not isinstance(_UpperCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_=2 , snake_case_=3 , snake_case_=6_4 , snake_case_=None ) -> List[str]:
'''simple docstring'''
__lowercase = np.random.default_rng(snake_case_ )
__lowercase = length
__lowercase = rng.normal(size=(length,) ).astype(np.floataa )
__lowercase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> List[Any]:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ) -> List[str]:
'''simple docstring'''
super().__init__()
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__lowercase = True
def A ( self , snake_case_=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__lowercase = False
return x * self.a + self.b
def lowercase_ ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
__lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowercase = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__lowercase = load_dataset('''csv''' , data_files=_UpperCamelCase )
__lowercase = datasets['''train'''].unique('''label''' )
__lowercase = {v: i for i, v in enumerate(_UpperCamelCase )}
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__lowercase = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowercase = DataLoader(tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=2 )
__lowercase = DataLoader(tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 527
| 0
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(A__ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = _distribute_shards(**A__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = _split_gen_kwargs(A__ , A__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(A__ ):
_number_of_shards_in_gen_kwargs(A__ )
else:
UpperCamelCase = _number_of_shards_in_gen_kwargs(A__ )
assert out == expected
| 430
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gptj"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase__ : List[Any]=5_0_4_0_0 , UpperCamelCase__ : int=2_0_4_8 , UpperCamelCase__ : Dict=4_0_9_6 , UpperCamelCase__ : Dict=2_8 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]="gelu_new" , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=5_0_2_5_6 , UpperCamelCase__ : int=5_0_2_5_6 , UpperCamelCase__ : int=False , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = rotary_dim
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , 'pad_token_id' , UpperCamelCase__ ):
# TODO: how to do that better?
UpperCamelCase = 0
@property
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A ( self : List[str] ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self : str ):
"""simple docstring"""
return self._config.n_head
def A ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase = ordered_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def A ( self : int ):
"""simple docstring"""
return 1_3
| 430
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( _UpperCAmelCase ):
def __init__( self : List[str] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Any ):
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 704
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Union[str, Any] = 'nat'
_snake_case : List[str] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , __lowerCAmelCase : int=4 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=64 , __lowerCAmelCase : int=[3, 4, 6, 5] , __lowerCAmelCase : List[str]=[2, 4, 8, 16] , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : List[str]=3.0 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=1e-5 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(__lowerCAmelCase ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 275
| 0
|
import argparse
import json
import subprocess
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : str = (
F"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
UpperCAmelCase_ : Optional[int] = subprocess.run(_UpperCAmelCase , shell=_UpperCAmelCase , stdout=subprocess.PIPE )
UpperCAmelCase_ : List[Any] = output.stdout.decode('utf-8' )
UpperCAmelCase_ : Dict = json.loads(_UpperCAmelCase )
UpperCAmelCase_ : int = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCAmelCase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) > 0:
UpperCAmelCase_ : Dict = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return values.split(',' )
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
__UpperCAmelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 406
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A : List[str] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[str] = "dpt"
def __init__( self : int , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Any=1E-12 , UpperCAmelCase_ : List[Any]=384 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=[2, 5, 8, 11] , UpperCAmelCase_ : Optional[Any]="project" , UpperCAmelCase_ : Any=[4, 2, 1, 0.5] , UpperCAmelCase_ : Optional[int]=[96, 192, 384, 768] , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : Optional[Any]=-1 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=0.4 , UpperCAmelCase_ : Optional[Any]=255 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=[1, 1024, 24, 24] , UpperCAmelCase_ : Dict=[0, 1] , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Dict , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase : Union[str, Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
lowerCAmelCase : Union[str, Any] = BitConfig(**UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase : Dict = BitConfig(**UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : str = backbone_config
else:
raise ValueError(
f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
lowerCAmelCase : Tuple = backbone_featmap_shape
lowerCAmelCase : Dict = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
lowerCAmelCase : List[str] = None
lowerCAmelCase : str = None
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : int = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : int = qkv_bias
lowerCAmelCase : Tuple = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
lowerCAmelCase : Any = readout_type
lowerCAmelCase : List[str] = reassemble_factors
lowerCAmelCase : Optional[int] = neck_hidden_sizes
lowerCAmelCase : List[Any] = fusion_hidden_size
lowerCAmelCase : List[str] = head_in_index
lowerCAmelCase : List[Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase : int = use_auxiliary_head
lowerCAmelCase : Any = auxiliary_loss_weight
lowerCAmelCase : Any = semantic_loss_ignore_index
lowerCAmelCase : str = semantic_classifier_dropout
def lowercase__ ( self : str ):
lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase : Tuple = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 343
| 0
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = [randint(-1000 , 1000 ) for i in range(10 )]
__lowerCamelCase = randint(-5000 , 5000 )
return (arr, r)
_a : Any = make_dataset()
def UpperCamelCase__ ( _A: list[int] , _A: int ):
'''simple docstring'''
for triplet in permutations(_lowerCamelCase , 3 ):
if sum(_lowerCamelCase ) == target:
return tuple(sorted(_lowerCamelCase ) )
return (0, 0, 0)
def UpperCamelCase__ ( _A: list[int] , _A: int ):
'''simple docstring'''
arr.sort()
__lowerCamelCase = len(_lowerCamelCase )
for i in range(n - 1 ):
__lowerCamelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
__lowerCamelCase = "\ntriplet_sum1(*dataset)\n"
__lowerCamelCase = "\ntriplet_sum2(*dataset)\n"
__lowerCamelCase = repeat(setup=_lowerCamelCase , stmt=_lowerCamelCase , repeat=5 , number=10000 )
__lowerCamelCase = repeat(setup=_lowerCamelCase , stmt=_lowerCamelCase , repeat=5 , number=10000 )
return (min(_lowerCamelCase ), min(_lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a : str = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 704
|
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''image_processor''', '''feature_extractor''']
A = '''TvltImageProcessor'''
A = '''TvltFeatureExtractor'''
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
super().__init__(image_processor=UpperCAmelCase , feature_extractor=UpperCAmelCase )
__lowerCamelCase = image_processor
__lowerCamelCase = feature_extractor
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , *UpperCAmelCase , **UpperCAmelCase , ):
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowerCamelCase = None
if images is not None:
__lowerCamelCase = self.image_processor(UpperCAmelCase , mask_pixel=UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if images_mixed is not None:
__lowerCamelCase = self.image_processor(UpperCAmelCase , is_mixed=UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if audio is not None:
__lowerCamelCase = self.feature_extractor(
UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , mask_audio=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase = {}
if audio is not None:
output_dict.update(UpperCAmelCase )
if images is not None:
output_dict.update(UpperCAmelCase )
if images_mixed_dict is not None:
output_dict.update(UpperCAmelCase )
return output_dict
@property
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.image_processor.model_input_names
__lowerCamelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 571
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCAmelCase__ = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 621
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A ( self ) -> int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'A painting of a squirrel eating a burger'
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.device_count()
_SCREAMING_SNAKE_CASE : List[Any] = num_samples * [prompt]
_SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = replicate(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = shard(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_SCREAMING_SNAKE_CASE : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : Optional[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = 'stabilityai/stable-diffusion-2'
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision='bf16' , dtype=jnp.bfloataa , )
_SCREAMING_SNAKE_CASE : Any = scheduler_params
_SCREAMING_SNAKE_CASE : int = 'A painting of a squirrel eating a burger'
_SCREAMING_SNAKE_CASE : Tuple = jax.device_count()
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_samples * [prompt]
_SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Dict = replicate(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = shard(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Any = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(lowerCAmelCase_ , jax.device_count() )
_SCREAMING_SNAKE_CASE : int = sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=2_5 , jit=lowerCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_SCREAMING_SNAKE_CASE : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : str = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 621
| 1
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCamelCase__ = 4
UpperCamelCase__ = 48
UpperCamelCase__ = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase__ = [6, 6, 6, 6]
UpperCamelCase__ = 60
UpperCamelCase__ = [6, 6, 6, 6]
UpperCamelCase__ = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase__ = 4
UpperCamelCase__ = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = 126
UpperCamelCase__ = 7
UpperCamelCase__ = 255.0
UpperCamelCase__ = """"""
return config
def __magic_name__( _A , _A ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCamelCase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase__ = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCamelCase__ = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
UpperCamelCase__ = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
UpperCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCamelCase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
UpperCamelCase__ = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
UpperCamelCase__ = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
UpperCamelCase__ = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
UpperCamelCase__ = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCamelCase__ = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
UpperCamelCase__ = """layernorm.weight"""
if name == "norm.bias":
UpperCamelCase__ = """layernorm.bias"""
if "conv_first" in name:
UpperCamelCase__ = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCamelCase__ = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCamelCase__ = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
UpperCamelCase__ = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
UpperCamelCase__ = name.replace("""upsample.2""" , """upsample.convolution_1""" )
UpperCamelCase__ = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCamelCase__ = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
UpperCamelCase__ = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
UpperCamelCase__ = """swin2sr.""" + name
return name
def __magic_name__( _A , _A ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(_A )
if "qkv" in key:
UpperCamelCase__ = key.split(""".""" )
UpperCamelCase__ = int(key_split[1] )
UpperCamelCase__ = int(key_split[4] )
UpperCamelCase__ = config.embed_dim
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[dim : dim * 2]
UpperCamelCase__ = val[-dim:]
pass
else:
UpperCamelCase__ = val
return orig_state_dict
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = get_config(_A )
UpperCamelCase__ = SwinaSRForImageSuperResolution(_A )
model.eval()
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )
UpperCamelCase__ = convert_state_dict(_A , _A )
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(_A , strict=_A )
if len(_A ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(_A ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
UpperCamelCase__ = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCamelCase__ = Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" )
UpperCamelCase__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCamelCase__ = 126 if """Jpeg""" in checkpoint_url else 256
UpperCamelCase__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCamelCase__ = transforms(_A ).unsqueeze(0 )
if config.num_channels == 1:
UpperCamelCase__ = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCamelCase__ = model(_A )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCamelCase__ = torch.Size([1, 3, 512, 512] )
UpperCamelCase__ = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase__ = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase__ = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCamelCase__ = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase__ = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase__ = torch.Size([1, 3, 512, 512] )
UpperCamelCase__ = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase__ = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase__ = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _A , atol=1e-3 )
print("""Looks ok!""" )
UpperCamelCase__ = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCamelCase__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 719
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase_ : List[str] = TypeVar('''T''')
lowerCamelCase_ : Optional[int] = TypeVar('''U''')
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict , lowercase : T | None , lowercase : U | None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self : List[Any] ) -> str:
'''simple docstring'''
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
'''simple docstring'''
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ = ["""DoubleLinkedList"""]
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(lowercase ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase )
def A ( self : str , lowercase : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def A ( self : Any , lowercase : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
__a : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : int , lowercase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__( self : Any ) -> str:
'''simple docstring'''
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : Any , lowercase : T ) -> bool:
'''simple docstring'''
return key in self.cache
def A ( self : Tuple , lowercase : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase )
return node.val
self.miss += 1
return None
def A ( self : Dict , lowercase : T , lowercase : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(lowercase )
@classmethod
def A ( cls : Optional[int] , lowercase : int = 1_2_8 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(lowercase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(lowercase )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*lowercase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase , """cache_info""" , lowercase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : bool = False ):
if radian_mode:
return [magnitude * cos(__UpperCAmelCase ), magnitude * sin(__UpperCAmelCase )]
return [magnitude * cos(radians(__UpperCAmelCase ) ), magnitude * sin(radians(__UpperCAmelCase ) )]
def UpperCAmelCase__( __UpperCAmelCase : NDArray[floataa] , __UpperCAmelCase : NDArray[floataa] , __UpperCAmelCase : float = 10**-1 ):
__snake_case : NDArray[floataa] = cross(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : float = sum(__UpperCAmelCase )
return abs(__UpperCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
__magic_name__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 576
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__magic_name__ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__magic_name__ = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCAmelCase__( __UpperCAmelCase : Vector , __UpperCAmelCase : Vector ):
return np.sqrt(np.sum((np.asarray(__UpperCAmelCase ) - np.asarray(__UpperCAmelCase )) ** 2 ) )
def UpperCAmelCase__( __UpperCAmelCase : Vector , __UpperCAmelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(__UpperCAmelCase , __UpperCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCAmelCase__( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 576
| 1
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _a ( __lowercase="" ) -> str:
"""simple docstring"""
__UpperCamelCase = tempfile.mkdtemp()
return os.path.join(__lowercase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Tuple:
__UpperCamelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
__UpperCamelCase = AgentAudio(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
# Ensure that the file contains the same value as the original tensor
__UpperCamelCase , __UpperCamelCase = sf.read(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , torch.tensor(_SCREAMING_SNAKE_CASE ) , atol=1e-4 ) )
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
__UpperCamelCase = get_new_path(suffix='.wav' )
sf.write(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 16_000 )
__UpperCamelCase = AgentAudio(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , _SCREAMING_SNAKE_CASE )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = torch.randint(0 , 256 , (64, 64, 3) )
__UpperCamelCase = AgentImage(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> int:
__UpperCamelCase = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
__UpperCamelCase = Image.open(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = AgentImage(_SCREAMING_SNAKE_CASE )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
__UpperCamelCase = Image.open(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = AgentImage(_SCREAMING_SNAKE_CASE )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Any:
__UpperCamelCase = 'Hey!'
__UpperCamelCase = AgentText(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , agent_type.to_string() )
self.assertEqual(_SCREAMING_SNAKE_CASE , agent_type.to_raw() )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 567
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _SCREAMING_SNAKE_CASE = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=3 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = do_resize
__UpperCamelCase = size if size is not None else {'shortest_edge': 288}
__UpperCamelCase = size_divisor
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = do_center_crop
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_pad
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
def __lowercase( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
if not batched:
__UpperCamelCase = self.size['shortest_edge']
__UpperCamelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__UpperCamelCase , __UpperCamelCase = image.size
else:
__UpperCamelCase , __UpperCamelCase = image.shape[1], image.shape[2]
__UpperCamelCase = size / min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if h < w:
__UpperCamelCase , __UpperCamelCase = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase = scale * h, size
__UpperCamelCase = int((1_333 / 800) * size )
if max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > max_size:
__UpperCamelCase = max_size / max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = newh * scale
__UpperCamelCase = neww * scale
__UpperCamelCase , __UpperCamelCase = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = BridgeTowerImageProcessingTester(self )
@property
def __lowercase( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size_divisor' ) )
def __lowercase( self ) -> Dict:
pass
def __lowercase( self ) -> Any:
# Initialize image processor
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase( self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase( self ) -> Optional[int]:
# Initialize image processor
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 567
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.