code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '▁'
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase_ = {
'facebook/xglm-564M': 2_048,
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int ,_snake_case : Dict ,_snake_case : Dict="<s>" ,_snake_case : Dict="</s>" ,_snake_case : str="</s>" ,_snake_case : Optional[Any]="<s>" ,_snake_case : Optional[Any]="<unk>" ,_snake_case : Optional[int]="<pad>" ,_snake_case : Optional[Dict[str, Any]] = None ,**_snake_case : str ,) -> None:
"""simple docstring"""
lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Dict = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
lowercase__ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[int] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Optional[int] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase__ : List[str] = len(self.sp_model )
lowercase__ : Tuple = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_snake_case )
lowercase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Optional[int] = None
lowercase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowercase__ : Dict = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self : Any ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ,_snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case ))
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case ))
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self : Any ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self : Tuple ,_snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case ,''' ''' ).strip()
return out_string
def UpperCAmelCase ( self : Any ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,'''wb''' ) as fi:
lowercase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 16
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 340
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase )
| 340
| 1
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase__:
def __init__( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Any=13 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: Any=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]=99 , UpperCamelCase_: Optional[int]=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: int=4 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=True , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: int=16 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[Any]=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = weight_tying
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self: Any ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = GPTNeoXJapaneseModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = True
__lowerCamelCase = GPTNeoXJapaneseModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: int ):
__lowerCamelCase = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = True
__lowerCamelCase = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
__lowerCamelCase = output_from_no_past["""hidden_states"""][0]
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = GPTNeoXJapaneseModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
# This regression test was failing with PyTorch < 1.3
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """abeja/gpt-neox-japanese-2.7b"""
__lowerCamelCase = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
__lowerCamelCase = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
__lowerCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = []
for prompt in prompts:
__lowerCamelCase = tokenizer(UpperCamelCase_ , return_tensors="""pt""" ).input_ids
__lowerCamelCase = model.generate(UpperCamelCase_ , max_length=50 )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 12
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12
| 1
|
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase :
def __init__( self, lowercase_ = 0 ) -> Optional[int]:
snake_case = key
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> list[str]:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_ ) ^ key ) for ch in content]
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> list[str]:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_ ) ^ key ) for ch in content]
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> str:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case = ''
for ch in content:
ans += chr(ord(lowercase_ ) ^ key )
return ans
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> str:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case = ''
for ch in content:
ans += chr(ord(lowercase_ ) ^ key )
return ans
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> bool:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
try:
with open(lowercase_ ) as fin, open('encrypt.out', 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_, lowercase_ ) )
except OSError:
return False
return True
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> bool:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
try:
with open(lowercase_ ) as fin, open('decrypt.out', 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_, lowercase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 364
|
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332
| 0
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCAmelCase (lowercase_ , lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 1
@register_to_config
def __init__(self : Tuple , UpperCamelCase : int = 1000 , UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(_lowerCAmelCase )
# standard deviation of the initial noise distribution
lowercase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase__ = 4
# running values
lowercase__ = []
def UpperCamelCase__ (self : Any , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = num_inference_steps
lowercase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase__ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase__ = (1.0 - self.betas**2) ** 0.5
lowercase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase__ = timesteps.to(_lowerCAmelCase )
lowercase__ = []
def UpperCamelCase__ (self : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = (self.timesteps == timestep).nonzero().item()
lowercase__ = timestep_index + 1
lowercase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCAmelCase )
if len(self.ets ) == 1:
lowercase__ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase__ = self._get_prev_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def UpperCamelCase__ (self : str , UpperCamelCase : torch.FloatTensor , *UpperCamelCase : int , **UpperCamelCase : Dict ):
'''simple docstring'''
return sample
def UpperCamelCase__ (self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.alphas[timestep_index]
lowercase__ = self.betas[timestep_index]
lowercase__ = self.alphas[prev_timestep_index]
lowercase__ = self.betas[prev_timestep_index]
lowercase__ = (sample - sigma * ets) / max(_lowerCAmelCase , 1E-8 )
lowercase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 2
|
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :List[str] = ""
for word_or_phrase in separated:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 236
| 0
|
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase__ : Optional[int] = g.get_repo('''huggingface/diffusers''' )
lowerCAmelCase__ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase__ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
lowerCAmelCase__ : List[Any] = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 211
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _a , _a=1_000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase__ : int = n - 1
lowerCAmelCase__ : Any = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase__ : Optional[Any] = 0
while count < prec:
lowerCAmelCase__ : Optional[Any] = random.randint(2 , n - 1 )
lowerCAmelCase__ : List[Any] = bin_exp_mod(_a , _a , _a )
if b != 1:
lowerCAmelCase__ : Dict = True
for _ in range(_a ):
if b == n - 1:
lowerCAmelCase__ : Union[str, Any] = False
break
lowerCAmelCase__ : Tuple = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 211
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'Speech2TextFeatureExtractor'
__UpperCAmelCase : int = 'Speech2TextTokenizer'
def __init__(self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.feature_extractor
UpperCAmelCase__ = False
def __call__(self : Any , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase__ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase__ = kwargs.pop("audio" , __UpperCAmelCase )
UpperCAmelCase__ = kwargs.pop("sampling_rate" , __UpperCAmelCase )
UpperCAmelCase__ = kwargs.pop("text" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
UpperCAmelCase__ = args[0]
UpperCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase__ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
UpperCAmelCase__ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase__ = encodings["input_ids"]
return inputs
def lowercase_ (self : Optional[Any] , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def lowercase_ (self : Dict ) -> str:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase__ = True
UpperCAmelCase__ = self.tokenizer
yield
UpperCAmelCase__ = self.feature_extractor
UpperCAmelCase__ = False
| 65
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''depth_multiplier''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Optional[Any]=13, lowerCamelCase : List[str]=3, lowerCamelCase : List[str]=32, lowerCamelCase : Union[str, Any]=0.25, lowerCamelCase : int=8, lowerCamelCase : Dict=True, lowerCamelCase : Optional[int]=1_024, lowerCamelCase : List[str]=32, lowerCamelCase : Optional[int]="relu6", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : List[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : Dict=10, lowerCamelCase : Optional[int]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = min_depth
lowercase__ = tf_padding
lowercase__ = int(last_hidden_size * depth_multiplier )
lowercase__ = output_stride
lowercase__ = hidden_act
lowercase__ = classifier_dropout_prob
lowercase__ = use_labels
lowercase__ = is_training
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = scope
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def lowercase__ ( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = MobileNetVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = MobileNetVaModelTester(self )
lowercase__ = MobileNetVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : List[str] ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = 26
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = MobileNetVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : str ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger(__name__)
_a : Union[str, Any] = "Hello, World!"
_a : Optional[Any] = "en_XX"
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = Path('''data_bin''' )
_UpperCamelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__snake_case ).parent ), checkpoint_file=Path(__snake_case ).name, _name='''xmod_base''', arch='''xmod_base''', task='''multilingual_masked_lm''', data_name_or_path=str(__snake_case ), bpe='''sentencepiece''', sentencepiece_model=str(Path(__snake_case ).parent / '''sentencepiece.bpe.model''' ), src_dict=str(data_dir / '''dict.txt''' ), )
xmod.eval() # disable dropout
print(__snake_case )
_UpperCamelCase = xmod.model.encoder.sentence_encoder
_UpperCamelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=5_14, type_vocab_size=1, layer_norm_eps=1e-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, '''bottleneck''', 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
_UpperCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''', __snake_case )
_UpperCamelCase = XmodForSequenceClassification(__snake_case ) if classification_head else XmodForMaskedLM(__snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
_UpperCamelCase = xmod_sent_encoder.embed_tokens.weight
_UpperCamelCase = xmod_sent_encoder.embed_positions.weight
_UpperCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_UpperCamelCase = xmod_sent_encoder.layernorm_embedding.weight
_UpperCamelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_UpperCamelCase = model.roberta.encoder.layer[i]
_UpperCamelCase = xmod_sent_encoder.layers[i]
# self attention
_UpperCamelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
_UpperCamelCase = xmod_layer.self_attn.q_proj.weight
_UpperCamelCase = xmod_layer.self_attn.q_proj.bias
_UpperCamelCase = xmod_layer.self_attn.k_proj.weight
_UpperCamelCase = xmod_layer.self_attn.k_proj.bias
_UpperCamelCase = xmod_layer.self_attn.v_proj.weight
_UpperCamelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
_UpperCamelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
_UpperCamelCase = xmod_layer.self_attn.out_proj.weight
_UpperCamelCase = xmod_layer.self_attn.out_proj.bias
_UpperCamelCase = xmod_layer.self_attn_layer_norm.weight
_UpperCamelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
_UpperCamelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
_UpperCamelCase = xmod_layer.fca.weight
_UpperCamelCase = xmod_layer.fca.bias
# output
_UpperCamelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
_UpperCamelCase = xmod_layer.fca.weight
_UpperCamelCase = xmod_layer.fca.bias
_UpperCamelCase = xmod_layer.final_layer_norm.weight
_UpperCamelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_UpperCamelCase = xmod_layer.adapter_layer_norm.weight
_UpperCamelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_UpperCamelCase = bert_output.adapter_modules[lang_code]
_UpperCamelCase = xmod_layer.adapter_modules[lang_code]
_UpperCamelCase = from_adapter.fca.weight
_UpperCamelCase = from_adapter.fca.bias
_UpperCamelCase = from_adapter.fca.weight
_UpperCamelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_UpperCamelCase = xmod_sent_encoder.layer_norm.weight
_UpperCamelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
_UpperCamelCase = xmod.model.classification_heads['''mnli'''].dense.weight
_UpperCamelCase = xmod.model.classification_heads['''mnli'''].dense.bias
_UpperCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight
_UpperCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_UpperCamelCase = xmod.model.encoder.lm_head.dense.weight
_UpperCamelCase = xmod.model.encoder.lm_head.dense.bias
_UpperCamelCase = xmod.model.encoder.lm_head.layer_norm.weight
_UpperCamelCase = xmod.model.encoder.lm_head.layer_norm.bias
_UpperCamelCase = xmod.model.encoder.lm_head.weight
_UpperCamelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_UpperCamelCase = xmod.encode(__snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__snake_case )
_UpperCamelCase = model(__snake_case )[0]
if classification_head:
_UpperCamelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__snake_case ) )
else:
_UpperCamelCase = xmod.model(__snake_case, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
_UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_UpperCamelCase = torch.allclose(__snake_case, __snake_case, atol=1e-3 )
print('''Do both models output the same tensors?''', '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__snake_case ).mkdir(parents=__snake_case, exist_ok=__snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_a : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 357
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=10_00 , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = range_bbox
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase = bbox[i, j, 3]
_UpperCamelCase = bbox[i, j, 1]
_UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase = bbox[i, j, 2]
_UpperCamelCase = bbox[i, j, 0]
_UpperCamelCase = t
_UpperCamelCase = tf.convert_to_tensor(__a)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModel(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , __a , token_type_ids=__a)
_UpperCamelCase = model(__a , __a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForMaskedLM(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFLayoutLMForSequenceClassification(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFLayoutLMForTokenClassification(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMForQuestionAnswering(config=__a)
_UpperCamelCase = model(__a , __a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = True
lowercase__ = 10
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFLayoutLMModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip('''Onnx compliancy broke with TF 2.10''')
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
_UpperCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_UpperCamelCase = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''')
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a)
# test the sequence output on [0, :3, :3]
_UpperCamelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-3))
# test the pooled output on [1, :3]
_UpperCamelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , __a , atol=1e-3))
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
# initialize model with randomly initialized sequence classification head
_UpperCamelCase = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=tf.convert_to_tensor([1, 1]) , )
# test whether we get a loss as a scalar
_UpperCamelCase = outputs.loss
_UpperCamelCase = (2,)
self.assertEqual(loss.shape , __a)
# test the shape of the logits
_UpperCamelCase = outputs.logits
_UpperCamelCase = (2, 2)
self.assertEqual(logits.shape , __a)
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
# initialize model with randomly initialized token classification head
_UpperCamelCase = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(
input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a , labels=__a)
# test the shape of the logits
_UpperCamelCase = outputs.logits
_UpperCamelCase = tf.convert_to_tensor((2, 25, 13))
self.assertEqual(logits.shape , __a)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
# initialize model with randomly initialized token classification head
_UpperCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''')
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
_UpperCamelCase = model(input_ids=__a , bbox=__a , attention_mask=__a , token_type_ids=__a)
# test the shape of the logits
_UpperCamelCase = tf.convert_to_tensor((2, 25))
self.assertEqual(outputs.start_logits.shape , __a)
self.assertEqual(outputs.end_logits.shape , __a)
| 100
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""camembert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> Any:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = classifier_dropout
class lowercase__ ( _UpperCAmelCase ):
@property
def UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 340
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Dict = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 73
|
"""simple docstring"""
import qiskit
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> qiskit.result.counts.Counts:
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowerCamelCase = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCamelCase = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73
| 1
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCamelCase__ = Lock()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__lowerCAmelCase : Tuple = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__lowerCAmelCase : Union[str, Any] = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__lowerCAmelCase : Tuple = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__lowerCAmelCase : Optional[Any] = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__lowerCAmelCase : Union[str, Any] = Pipe()
__lowerCAmelCase : Union[str, Any] = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__lowerCAmelCase : List[str] = temp_rs
__lowerCAmelCase : Any = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
__lowerCAmelCase : Optional[Any] = Pipe()
__lowerCAmelCase : Any = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__lowerCAmelCase : Optional[int] = temp_rs
__lowerCAmelCase : Tuple = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
__lowerCAmelCase : Dict = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[int] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case_ )
__lowerCAmelCase : Union[str, Any] = odd_even_transposition(snake_case_ )
print('Sorted List\n' )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 86
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ):
if isinstance(snake_case_ , np.ndarray ):
return list(tensor.shape )
__UpperCAmelCase = tf.shape(snake_case_ )
if tensor.shape == tf.TensorShape(snake_case_ ):
return dynamic
__UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )]
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ )
def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__UpperCAmelCase = [1] * inputs.shape.rank
__UpperCAmelCase = shape_list(snake_case_ )[axis]
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
# Compute layer normalization using the batch_normalization
# function.
__UpperCAmelCase = tf.nn.batch_normalization(
snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , )
return outputs
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__UpperCAmelCase = tf.shape(snake_case_ )
__UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :tf.Tensor ):
if not isinstance(snake_case_ , tf.Tensor ):
__UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ):
tf.debugging.assert_less(
snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ):
__UpperCAmelCase = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
__UpperCAmelCase = np.asarray(snake_case_ )
__UpperCAmelCase = 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case_ ):
__UpperCAmelCase = chunk_data
else:
__UpperCAmelCase = data
def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ):
if name in group.attrs:
__UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__UpperCAmelCase = []
__UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase__ ( snake_case_ :Tuple ):
def _expand_single_ad_tensor(snake_case_ :Optional[int] ):
if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
| 332
| 0
|
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase: List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __SCREAMING_SNAKE_CASE ( ):
_lowercase : str = os.path.dirname(os.path.realpath(__UpperCAmelCase ) )
_lowercase : Optional[int] = os.path.join(__UpperCAmelCase , """words.txt""" )
_lowercase : Tuple = """"""
with open(__UpperCAmelCase ) as f:
_lowercase : Union[str, Any] = f.readline()
_lowercase : Any = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
_lowercase : Any = [
word
for word in [sum(ord(__UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 336
|
"""simple docstring"""
import pprint
import requests
UpperCAmelCase: Tuple = """https://zenquotes.io/api"""
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __SCREAMING_SNAKE_CASE ( ):
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCAmelCase: int = random_quotes()
pprint.pprint(response)
| 336
| 1
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowerCAmelCase (__A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''').lower()).values()) < 2
def lowerCAmelCase (__A = ""):
"""simple docstring"""
if len(__A) == 0:
return True
_a = input_str.replace(''' ''' , '''''').lower()
# character_freq_dict: Stores the frequency of every character in the input string
_a = {}
for character in lower_case_input_str:
_a = character_freq_dict.get(__A , 0) + 1
_a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCAmelCase (__A = ""):
"""simple docstring"""
print('''\nFor string = ''' , __A , ''':''')
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__A) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__A) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowercase_ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowercase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 211
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211
| 1
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
SCREAMING_SNAKE_CASE__ : Optional[Any] = array[indexa], array[indexa]
def _a ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
'''simple docstring'''
if length > 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(length / 2 )
for i in range(SCREAMING_SNAKE_CASE__ , low + middle ):
comp_and_swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + middle , SCREAMING_SNAKE_CASE__ )
bitonic_merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
bitonic_merge(SCREAMING_SNAKE_CASE__ , low + middle , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
'''simple docstring'''
if length > 1:
SCREAMING_SNAKE_CASE__ : Dict = int(length / 2 )
bitonic_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
bitonic_sort(SCREAMING_SNAKE_CASE__ , low + middle , SCREAMING_SNAKE_CASE__ , 0 )
bitonic_merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 358
|
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
SCREAMING_SNAKE_CASE__ : List[Any] = ""
while len(SCREAMING_SNAKE_CASE__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : str = "0" + bin_string
SCREAMING_SNAKE_CASE__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE__ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__ ) )
oct_string += str(SCREAMING_SNAKE_CASE__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 191
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _A ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, "sqlalchemy.sql.Selectable"] , __SCREAMING_SNAKE_CASE : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = Sql(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , sql=__SCREAMING_SNAKE_CASE , con=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , )
# Build dataset for splits
__a = self.builder.as_dataset(
split='''train''' , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
class _A :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Dataset , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.')
__a = dataset
__a = name
__a = con
__a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a = num_proc
__a = to_sql_kwargs
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.to_sql_kwargs.pop('''sql''' , __SCREAMING_SNAKE_CASE)
__a = self.to_sql_kwargs.pop('''con''' , __SCREAMING_SNAKE_CASE)
__a = self.to_sql_kwargs.pop('''index''' , __SCREAMING_SNAKE_CASE)
__a = self._write(index=__SCREAMING_SNAKE_CASE , **self.to_sql_kwargs)
return written
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a , __a = args
__a = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__a = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size) , indices=self.dataset._indices , )
__a = batch.to_pandas()
__a = df.to_sql(self.name , self.con , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return num_rows or len(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
__a , __a = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 49
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = KandinskyVaaImgaImgPipeline
__lowercase : str = ['''image_embeds''', '''negative_image_embeds''', '''image''']
__lowercase : Dict = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__lowercase : Tuple = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowercase : Tuple = False
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
return self.time_input_dim
@property
def snake_case_ ( self):
return self.time_input_dim * 4
@property
def snake_case_ ( self):
return 1_0_0
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(**lowerCAmelCase__)
return model
@property
def snake_case_ ( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs)
return model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.dummy_unet
__SCREAMING_SNAKE_CASE = self.dummy_movq
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__SCREAMING_SNAKE_CASE = DDIMScheduler(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCAmelCase__)
# create init_image
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("""RGB""").resize((2_5_6, 2_5_6))
if str(lowerCAmelCase__).startswith("""mps"""):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(lowerCAmelCase__) , return_dict=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""")
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""")
__SCREAMING_SNAKE_CASE = """A red cartoon frog, 4k"""
__SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa)
pipe_prior.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa)
__SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""").manual_seed(0)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__SCREAMING_SNAKE_CASE = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 100
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=9_9 , UpperCamelCase__ : List[str]=3_2 , UpperCamelCase__ : int=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : int=3_7 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=5_1_2 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[str]="None" , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = relative_attention
UpperCamelCase = position_biased_input
UpperCamelCase = pos_att_type
UpperCamelCase = scope
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = 3_0_0
return config
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def A ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = DebertaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
UpperCamelCase = model(UpperCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def A ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = DebertaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DebertaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DebertaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = DebertaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = DebertaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = DebertaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = DebertaModel.from_pretrained('microsoft/deberta-base' )
UpperCamelCase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
# compare the actual values for a slice.
UpperCamelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 249
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_lowerCamelCase : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : str ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 4
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Tuple ):
"""simple docstring"""
return (4, 3_2, 3_2)
@property
def A ( self : Dict ):
"""simple docstring"""
return (4, 3_2, 3_2)
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
UpperCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
UpperCamelCase = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
UpperCamelCase = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['sample']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCamelCase__ )
UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : str , UpperCamelCase__ : Any=(3_2, 3_2) ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = self.dummy_input
UpperCamelCase = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(UpperCamelCase__ )
UpperCamelCase = noise
UpperCamelCase = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (2_5_6, 2_5_6)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : List[str] ):
"""simple docstring"""
pass
| 249
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
a =1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a =0
a =0xe_0_0_0
a =0xe_0_0_1
a =0xe_0_0_2
a =0xe_0_0_3
a =0xe_0_0_4
# Maps special codepoints to human-readable names.
a ={
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a ={name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int=chr(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ : Tuple=chr(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ : str=chr(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=chr(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ : Any=chr(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ : List[Any]=chr(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ : Optional[int]=False ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_4_8 ,**SCREAMING_SNAKE_CASE__ : int ,):
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else bos_token
__lowerCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else eos_token
__lowerCamelCase : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else sep_token
__lowerCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else cls_token
__lowerCamelCase : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,add_prefix_space=SCREAMING_SNAKE_CASE__ ,model_max_length=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# Creates a mapping for looking up the IDs of special symbols.
__lowerCamelCase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__lowerCamelCase : Union[str, Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__lowerCamelCase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__lowerCamelCase : Any = UNICODE_VOCAB_SIZE
__lowerCamelCase : Dict = len(self._special_codepoints)
@property
def lowerCAmelCase ( self : Any):
return self._unicode_vocab_size
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
return list(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
try:
return ord(SCREAMING_SNAKE_CASE__)
except TypeError:
raise ValueError(F"invalid token: '{token}'")
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(SCREAMING_SNAKE_CASE__)
except TypeError:
raise ValueError(F"invalid id: {index}")
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
return "".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : Optional[Any] = [self.cls_token_id]
__lowerCamelCase : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
if token_ids_a is not None:
result += ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return result
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
__lowerCamelCase : Dict = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
return ()
| 73
|
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ = 4 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = abs(lowerCAmelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return reverse_row(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return reverse_row(reverse_column(lowerCAmelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return reverse_column(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )]
return matrix
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = matrix[::-1]
return matrix
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [x[::-1] for x in matrix]
return matrix
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
for i in matrix:
print(*lowerCAmelCase_ )
if __name__ == "__main__":
a__ : int = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
a__ : Optional[int] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
a__ : int = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 195
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_flip_channel_order
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCAmelCase__ , size=size["shortest_edge"] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size["height"], size["width"]) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> Dict:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ , param_name="crop_size" )
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__SCREAMING_SNAKE_CASE = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 195
| 1
|
import os
# Precomputes a list of the 100 first triangular numbers
_lowerCamelCase : Any = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ) -> Any:
UpperCAmelCase : List[Any] = os.path.dirname(os.path.realpath(UpperCAmelCase ) )
UpperCAmelCase : List[str] = os.path.join(UpperCAmelCase , '''words.txt''' )
UpperCAmelCase : Optional[Any] = ''''''
with open(UpperCAmelCase ) as f:
UpperCAmelCase : List[Any] = f.readline()
UpperCAmelCase : Any = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
UpperCAmelCase : Union[str, Any] = [
word
for word in [sum(ord(UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 336
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : Union[str, Any] = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def a__ ( UpperCAmelCase : Dict=None ) -> Optional[int]:
if subparsers is not None:
UpperCAmelCase : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
UpperCAmelCase : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
UpperCAmelCase : Optional[int] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=UpperCAmelCase , default=UpperCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=UpperCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=UpperCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
UpperCAmelCase : Union[str, Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=UpperCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase : List[str] = defaults.commands
if not args.tpu_name:
UpperCAmelCase : Tuple = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase : int = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
UpperCAmelCase : Dict = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
UpperCAmelCase : Optional[int] = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
UpperCAmelCase : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
UpperCAmelCase : int = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
UpperCAmelCase : int = '''; '''.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase : Any = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(UpperCAmelCase )}''' )
return
subprocess.run(UpperCAmelCase )
print('''Successfully setup pod.''' )
def a__ ( ) -> Any:
UpperCAmelCase : Any = tpu_command_parser()
UpperCAmelCase : Tuple = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 336
| 1
|
from math import pi, sqrt, tan
def _snake_case( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _snake_case( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
A__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(SCREAMING_SNAKE_CASE__ , 2 ) * torus_radius * tube_radius
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _snake_case( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
A__ = (sidea + sidea + sidea) / 2
A__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _snake_case( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print("\nSurface Areas of various geometric shapes: \n")
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 355
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any],lowercase_ : str )-> List[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'],model_result['ss'] ):
A__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
A__ = 'sgugger/tiny-distilbert-classification'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,only_pretrain_model=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,torchscript=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu','Cant do half precision' )
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,fpaa=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu','Can\'t do half precision' )
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],fpaa=lowercase_,multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,save_to_csv=lowercase_,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(lowercase_,'inf_time.csv' ),train_memory_csv_file=os.path.join(lowercase_,'train_mem.csv' ),inference_memory_csv_file=os.path.join(lowercase_,'inf_mem.csv' ),train_time_csv_file=os.path.join(lowercase_,'train_time.csv' ),env_info_csv_file=os.path.join(lowercase_,'env.csv' ),multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'env.csv' ) ).exists() )
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ : Optional[Any] ):
self.assertTrue(hasattr(lowercase_,'sequential' ) )
self.assertTrue(hasattr(lowercase_,'cumulative' ) )
self.assertTrue(hasattr(lowercase_,'current' ) )
self.assertTrue(hasattr(lowercase_,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(lowercase_,'log.txt' ),log_print=lowercase_,trace_memory_line_by_line=lowercase_,multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_,'log.txt' ) ).exists() )
| 282
| 0
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowercase__ = logging.get_logger(__name__)
# General docstring
lowercase__ = """MobileNetV1Config"""
# Base docstring
lowercase__ = """google/mobilenet_v1_1.0_224"""
lowercase__ = [1, 1024, 7, 7]
# Image classification docstring
lowercase__ = """google/mobilenet_v1_1.0_224"""
lowercase__ = """tabby, tabby cat"""
lowercase__ = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = {}
if isinstance(a_ , a_ ):
lowerCAmelCase_ : Optional[Any] = model.mobilenet_va
else:
lowerCAmelCase_ : Dict = model
lowerCAmelCase_ : List[str] = '''MobilenetV1/Conv2d_0/'''
lowerCAmelCase_ : Dict = backbone.conv_stem.convolution.weight
lowerCAmelCase_ : Union[str, Any] = backbone.conv_stem.normalization.bias
lowerCAmelCase_ : Union[str, Any] = backbone.conv_stem.normalization.weight
lowerCAmelCase_ : str = backbone.conv_stem.normalization.running_mean
lowerCAmelCase_ : List[str] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCAmelCase_ : Optional[int] = i + 1
lowerCAmelCase_ : Any = i * 2
lowerCAmelCase_ : List[Any] = backbone.layer[pt_index]
lowerCAmelCase_ : int = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowerCAmelCase_ : Dict = pointer.convolution.weight
lowerCAmelCase_ : List[str] = pointer.normalization.bias
lowerCAmelCase_ : Optional[Any] = pointer.normalization.weight
lowerCAmelCase_ : str = pointer.normalization.running_mean
lowerCAmelCase_ : Union[str, Any] = pointer.normalization.running_var
lowerCAmelCase_ : Dict = backbone.layer[pt_index + 1]
lowerCAmelCase_ : Tuple = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowerCAmelCase_ : str = pointer.convolution.weight
lowerCAmelCase_ : str = pointer.normalization.bias
lowerCAmelCase_ : List[str] = pointer.normalization.weight
lowerCAmelCase_ : Tuple = pointer.normalization.running_mean
lowerCAmelCase_ : int = pointer.normalization.running_var
if isinstance(a_ , a_ ):
lowerCAmelCase_ : str = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowerCAmelCase_ : List[str] = model.classifier.weight
lowerCAmelCase_ : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
lowerCAmelCase_ : List[str] = tf.train.list_variables(a_ )
lowerCAmelCase_ : Any = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''' )
lowerCAmelCase_ : int = tf.train.load_variable(a_ , a_ )
lowerCAmelCase_ : Any = array
# Build TF to PyTorch weights loading map
lowerCAmelCase_ : List[Any] = _build_tf_to_pytorch_map(a_ , a_ , a_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''' )
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''' )
continue
lowerCAmelCase_ : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
lowerCAmelCase_ : List[Any] = np.transpose(a_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCAmelCase_ : Tuple = array.squeeze().transpose()
else:
lowerCAmelCase_ : str = np.transpose(a_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' )
lowerCAmelCase_ : List[Any] = torch.from_numpy(a_ )
tf_weights.pop(a_ , a_ )
tf_weights.pop(name + "/RMSProp" , a_ )
tf_weights.pop(name + "/RMSProp_1" , a_ )
tf_weights.pop(name + "/ExponentialMovingAverage" , a_ )
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> torch.Tensor:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = features.shape[-2:]
lowerCAmelCase_ : Dict = conv_layer.stride
lowerCAmelCase_ : List[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase_ : Tuple = max(kernel_height - stride_height , 0 )
else:
lowerCAmelCase_ : Dict = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCAmelCase_ : int = max(kernel_width - stride_width , 0 )
else:
lowerCAmelCase_ : List[Any] = max(kernel_width - (in_width % stride_width) , 0 )
lowerCAmelCase_ : str = pad_along_width // 2
lowerCAmelCase_ : Union[str, Any] = pad_along_width - pad_left
lowerCAmelCase_ : str = pad_along_height // 2
lowerCAmelCase_ : Dict = pad_along_height - pad_top
lowerCAmelCase_ : Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(a_ , a_ , "constant" , 0.0 )
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Optional[int] = 1 , a_ : str = 1 , a_ : Tuple = False , a_ : int = True , a_ : Dict = True , ):
super().__init__()
lowerCAmelCase_ : str = config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowerCAmelCase_ : List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase_ : Optional[int] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , groups=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ , padding_mode="zeros" , )
if use_normalization:
lowerCAmelCase_ : int = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=SCREAMING_SNAKE_CASE__ , track_running_stats=SCREAMING_SNAKE_CASE__ , )
else:
lowerCAmelCase_ : str = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ : Any = ACTaFN[use_activation]
elif isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ : int = ACTaFN[config.hidden_act]
else:
lowerCAmelCase_ : List[Any] = config.hidden_act
else:
lowerCAmelCase_ : Any = None
def lowerCamelCase ( self : int , a_ : str ):
if self.config.tf_padding:
lowerCAmelCase_ : List[str] = apply_tf_padding(SCREAMING_SNAKE_CASE__ , self.convolution )
lowerCAmelCase_ : List[str] = self.convolution(SCREAMING_SNAKE_CASE__ )
if self.normalization is not None:
lowerCAmelCase_ : List[str] = self.normalization(SCREAMING_SNAKE_CASE__ )
if self.activation is not None:
lowerCAmelCase_ : Optional[int] = self.activation(SCREAMING_SNAKE_CASE__ )
return features
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = MobileNetVaConfig
a_ : Optional[Any] = load_tf_weights_in_mobilenet_va
a_ : Tuple = '''mobilenet_v1'''
a_ : List[str] = '''pixel_values'''
a_ : Optional[Any] = False
def lowerCamelCase ( self : int , a_ : List[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowercase__ = r"""\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"""
lowercase__ = r"""\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"""
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a_ : Union[str, Any] , a_ : Any = True ):
super().__init__(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : List[Any] = config
lowerCAmelCase_ : Optional[Any] = 32
lowerCAmelCase_ : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCAmelCase_ : int = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ , in_channels=config.num_channels , out_channels=SCREAMING_SNAKE_CASE__ , kernel_size=3 , stride=2 , )
lowerCAmelCase_ : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase_ : Optional[Any] = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase_ : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase_ : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , kernel_size=3 , stride=strides[i] , groups=SCREAMING_SNAKE_CASE__ , ) )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ , in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , kernel_size=1 , ) )
lowerCAmelCase_ : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase ( self : Any , a_ : Tuple ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self : int , a_ : Optional[int] = None , a_ : Dict = None , a_ : List[Any] = None , ):
lowerCAmelCase_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowerCAmelCase_ : Tuple = self.conv_stem(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase_ : Tuple = layer_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
lowerCAmelCase_ : List[Any] = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : Tuple = hidden_states
if self.pooler is not None:
lowerCAmelCase_ : str = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__ ) , start_dim=1 )
else:
lowerCAmelCase_ : Dict = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : List[Any] , a_ : int ):
super().__init__(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Optional[Any] = config.num_labels
lowerCAmelCase_ : Any = MobileNetVaModel(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : int = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase_ : Any = nn.Dropout(config.classifier_dropout_prob , inplace=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : str = nn.Linear(SCREAMING_SNAKE_CASE__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self : List[str] , a_ : Union[str, Any] = None , a_ : List[str] = None , a_ : Optional[int] = None , a_ : int = None , ):
lowerCAmelCase_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Tuple = self.mobilenet_va(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase_ : Tuple = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : str = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase_ : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ : List[Any] = '''single_label_classification'''
else:
lowerCAmelCase_ : Tuple = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase_ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ : str = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ : Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase_ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ : Optional[int] = BCEWithLogitsLoss()
lowerCAmelCase_ : List[str] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
lowerCAmelCase_ : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states , )
| 241
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["DPTFeatureExtractor"]
lowerCamelCase_ = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 191
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline
UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
UpperCamelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return 32
@property
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return 100
@property
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase_ = UNetaDConditionModel(**A_ )
return model
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase_ = DDIMScheduler(**A_ )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ ( self : int , A_ : Optional[Any] , A_ : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create hint
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = 'cpu'
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**A_ )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(A_ ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase_ = init_image.resize((512, 512) )
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase_ = torch.from_numpy(np.array(A_ ) ).float() / 255.0
lowerCamelCase_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCamelCase_ = 'A robot, 4k photo'
lowerCamelCase_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
lowerCamelCase_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt='' , ).to_tuple()
lowerCamelCase_ = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 208
|
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Dict , lowercase : List[str] , lowercase : Dict , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
if index == r:
for j in range(lowercase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCamelCase_ = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Any , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 208
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="luke"
def __init__( self , UpperCamelCase_=5_02_67 , UpperCamelCase_=50_00_00 , UpperCamelCase_=7_68 , UpperCamelCase_=2_56 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , **UpperCamelCase_ , ) -> str:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : Optional[int] = vocab_size
__lowercase : Optional[int] = entity_vocab_size
__lowercase : int = hidden_size
__lowercase : int = entity_emb_size
__lowercase : Tuple = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Optional[int] = hidden_act
__lowercase : int = intermediate_size
__lowercase : int = hidden_dropout_prob
__lowercase : Union[str, Any] = attention_probs_dropout_prob
__lowercase : Any = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : List[Any] = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : Union[str, Any] = use_entity_aware_attention
__lowercase : List[Any] = classifier_dropout
| 249
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase_ ) -> Optional[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase : Union[str, Any] = deprecated_arg[3:]
setattr(self , UpperCamelCase_ , not kwargs.pop(UpperCamelCase_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__lowercase : Dict = kwargs.pop('''torchscript''' , self.torchscript )
__lowercase : str = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__lowercase : str = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCamelCase_ )
UpperCamelCase =field(default=snake_case , metadata={"help": "Trace the models using torchscript"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
UpperCamelCase =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _lowerCamelCase ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__lowercase : str = torch.device('''cpu''' )
__lowercase : Optional[Any] = 0
elif is_torch_tpu_available():
__lowercase : str = xm.xla_device()
__lowercase : Any = 0
else:
__lowercase : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase : Any = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCamelCase ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowerCamelCase ( self ) -> Dict:
return self.n_gpu > 0
| 249
| 1
|
lowerCAmelCase__ : List[Any] ={'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
lowerCAmelCase__ : str =['a', 'b', 'c', 'd', 'e']
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = start
# add current to visited
visited.append(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE_ : Dict = topological_sort(A__, A__, A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE_ : Dict = topological_sort(A__, A__, A__ )
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase__ : List[str] =topological_sort('a', [], [])
print(sort)
| 162
|
lowerCAmelCase__ : Dict ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Tuple = input('Enter message: ' )
SCREAMING_SNAKE_CASE_ : Any = input('Enter key [alphanumeric]: ' )
SCREAMING_SNAKE_CASE_ : Tuple = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'encrypt'
SCREAMING_SNAKE_CASE_ : int = encrypt_message(A__, A__ )
elif mode.lower().startswith('d' ):
SCREAMING_SNAKE_CASE_ : List[Any] = 'decrypt'
SCREAMING_SNAKE_CASE_ : Any = decrypt_message(A__, A__ )
print(F'''\n{mode.title()}ed message:''' )
print(A__ )
def a__ ( A__, A__ ):
return translate_message(A__, A__, 'encrypt' )
def a__ ( A__, A__ ):
return translate_message(A__, A__, 'decrypt' )
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = key.upper()
for symbol in message:
SCREAMING_SNAKE_CASE_ : List[str] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(A__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(A__ ):
SCREAMING_SNAKE_CASE_ : Dict = 0
else:
translated.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
main()
| 162
| 1
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if digit_amount > 0:
return round(number - int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return number - int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 195
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'shortest_edge': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case , param_name='size' , default_to_square=snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 195
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = """informer"""
_lowerCAmelCase : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "student_t" , lowerCAmelCase = "nll" , lowerCAmelCase = 1 , lowerCAmelCase = None , lowerCAmelCase = "mean" , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 64 , lowerCAmelCase = 32 , lowerCAmelCase = 32 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = True , lowerCAmelCase = "gelu" , lowerCAmelCase = 0.05 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1_00 , lowerCAmelCase = 0.02 , lowerCAmelCase=True , lowerCAmelCase = "prob" , lowerCAmelCase = 5 , lowerCAmelCase = True , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = prediction_length
snake_case = context_length or prediction_length
snake_case = distribution_output
snake_case = loss
snake_case = input_size
snake_case = num_time_features
snake_case = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case = scaling
snake_case = num_dynamic_real_features
snake_case = num_static_real_features
snake_case = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case = cardinality
else:
snake_case = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case = embedding_dimension
else:
snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case = num_parallel_samples
# Transformer architecture configuration
snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case = d_model
snake_case = encoder_attention_heads
snake_case = decoder_attention_heads
snake_case = encoder_ffn_dim
snake_case = decoder_ffn_dim
snake_case = encoder_layers
snake_case = decoder_layers
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = activation_function
snake_case = init_std
snake_case = use_cache
# Informer
snake_case = attention_type
snake_case = sampling_factor
snake_case = distil
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 352
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase : Optional[datasets.Features] = None
def lowerCAmelCase__ ( _UpperCamelCase : "pyspark.sql.DataFrame" , _UpperCamelCase : List[int] , ) -> Dict:
"""simple docstring"""
import pyspark
def generate_fn():
snake_case = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
snake_case = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' )
snake_case = partition_df.collect()
snake_case = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowerCAmelCase_ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = df
snake_case = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
"""simple docstring"""
yield from self.generate_examples_fn()
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.split_shard_indices_by_worker(lowerCAmelCase , lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.partition_order )
class lowerCAmelCase_ ( datasets.DatasetBuilder ):
"""simple docstring"""
_lowerCAmelCase : List[str] = SparkConfig
def __init__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
import pyspark
snake_case = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case = df
snake_case = working_dir
super().__init__(
cache_dir=lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
def create_cache_and_write_probe(lowerCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase )
snake_case = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(lowerCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
snake_case = self.df.count()
snake_case = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case = (
self.df.limit(lowerCAmelCase )
.repartition(1 )
.mapInArrow(lowerCAmelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case = min(lowerCAmelCase , int(approx_total_size / max_shard_size ) )
snake_case = self.df.repartition(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
import pyspark
snake_case = ParquetWriter if file_format == 'parquet' else ArrowWriter
snake_case = os.path.join(self._working_dir , os.path.basename(lowerCAmelCase ) ) if self._working_dir else fpath
snake_case = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case = self.config.features
snake_case = self._writer_batch_size
snake_case = self._fs.storage_options
def write_arrow(lowerCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case = pyspark.TaskContext().taskAttemptId()
snake_case = next(lowerCAmelCase , lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
snake_case = 0
snake_case = writer_class(
features=lowerCAmelCase , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
snake_case = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case ,snake_case = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
snake_case = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase , storage_options=lowerCAmelCase , embed_local_files=lowerCAmelCase , )
snake_case = pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase )
if writer._num_bytes > 0:
snake_case ,snake_case = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase ) ):
snake_case = os.path.join(os.path.dirname(lowerCAmelCase ) , os.path.basename(lowerCAmelCase ) )
shutil.move(lowerCAmelCase , lowerCAmelCase )
snake_case = (
self.df.mapInArrow(lowerCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = "arrow" , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
self._validate_cache_dir()
snake_case = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase )
snake_case = not is_remote_filesystem(self._fs )
snake_case = os.path.join if is_local else posixpath.join
snake_case = '-TTTTT-SSSSS-of-NNNNN'
snake_case = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
snake_case = path_join(self._output_dir , lowerCAmelCase )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = []
snake_case = []
for task_id, content in self._prepare_split_single(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase )
snake_case = total_num_examples
snake_case = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
snake_case = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
rename(
lowerCAmelCase , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
snake_case = []
snake_case = 0
for i in range(len(lowerCAmelCase ) ):
snake_case ,snake_case = task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase , len(lowerCAmelCase ) ).map(lambda lowerCAmelCase : _rename_shard(*lowerCAmelCase ) ).collect()
else:
# don't use any pattern
snake_case = 0
snake_case = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(lowerCAmelCase , '' ) , )
def snake_case ( self , lowerCAmelCase , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 149
| 0
|
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : List[Any] = data
# Initialize hash values
_lowerCamelCase : Optional[int] = [
0X6A09E667,
0XBB67AE85,
0X3C6EF372,
0XA54FF53A,
0X510E527F,
0X9B05688C,
0X1F83D9AB,
0X5BE0CD19,
]
# Initialize round constants
_lowerCamelCase : Tuple = [
0X428A2F98,
0X71374491,
0XB5C0FBCF,
0XE9B5DBA5,
0X3956C25B,
0X59F111F1,
0X923F82A4,
0XAB1C5ED5,
0XD807AA98,
0X12835B01,
0X243185BE,
0X550C7DC3,
0X72BE5D74,
0X80DEB1FE,
0X9BDC06A7,
0XC19BF174,
0XE49B69C1,
0XEFBE4786,
0X0FC19DC6,
0X240CA1CC,
0X2DE92C6F,
0X4A7484AA,
0X5CB0A9DC,
0X76F988DA,
0X983E5152,
0XA831C66D,
0XB00327C8,
0XBF597FC7,
0XC6E00BF3,
0XD5A79147,
0X06CA6351,
0X14292967,
0X27B70A85,
0X2E1B2138,
0X4D2C6DFC,
0X53380D13,
0X650A7354,
0X766A0ABB,
0X81C2C92E,
0X92722C85,
0XA2BFE8A1,
0XA81A664B,
0XC24B8B70,
0XC76C51A3,
0XD192E819,
0XD6990624,
0XF40E3585,
0X106AA070,
0X19A4C116,
0X1E376C08,
0X2748774C,
0X34B0BCB5,
0X391C0CB3,
0X4ED8AA4A,
0X5B9CCA4F,
0X682E6FF3,
0X748F82EE,
0X78A5636F,
0X84C87814,
0X8CC70208,
0X90BEFFFA,
0XA4506CEB,
0XBEF9A3F7,
0XC67178F2,
]
_lowerCamelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A_ ( lowercase ):
_lowerCamelCase : Optional[int] = B'\x80' + (B'\x00' * (63 - (len(lowercase ) + 8) % 64))
_lowerCamelCase : List[Any] = struct.pack('>Q' , (len(lowercase ) * 8) )
return data + padding + big_endian_integer
def A_ ( self ):
_lowerCamelCase : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCamelCase : Optional[Any] = list(struct.unpack('>16L' , lowercase ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCamelCase : List[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_lowerCamelCase : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_lowerCamelCase : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100000000
# Compression
_lowerCamelCase : str = self.ror(lowercase , 6 ) ^ self.ror(lowercase , 11 ) ^ self.ror(lowercase , 25 )
_lowerCamelCase : Tuple = (e & f) ^ ((~e & 0XFFFFFFFF) & g)
_lowerCamelCase : Any = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100000000
_lowerCamelCase : int = self.ror(lowercase , 2 ) ^ self.ror(lowercase , 13 ) ^ self.ror(lowercase , 22 )
_lowerCamelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
_lowerCamelCase : Dict = (sa + maj) % 0X100000000
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = (
g,
f,
e,
((d + tempa) % 0X100000000),
c,
b,
a,
((tempa + tempa) % 0X100000000),
)
_lowerCamelCase : List[str] = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCamelCase : Dict = [
((element + mutated_hash_values[index]) % 0X100000000)
for index, element in enumerate(self.hashes )
]
_lowerCamelCase : str = ''.join([hex(lowercase )[2:].zfill(8 ) for value in self.hashes] )
def A_ ( self , lowercase , lowercase ):
return 0XFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
import hashlib
_lowerCamelCase : Any = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(lowercase ).hash , hashlib.shaaaa(lowercase ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCamelCase : int = f.read()
else:
_lowerCamelCase : Union[str, Any] = bytes(__lowercase , 'utf-8' )
print(SHAaaa(__lowercase ).hash )
if __name__ == "__main__":
main()
| 96
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
| 282
| 0
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
return False
__magic_name__ : int = num
__magic_name__ : int = 0
while num > 0:
__magic_name__ : List[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
import socket
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__magic_name__ : Union[str, Any] = socket.gethostname()
__magic_name__ : int = 1_2312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
__magic_name__ : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 275
| 0
|
'''simple docstring'''
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _a : Dict ) -> Optional[Any]:
# we need a list not a string, so do something to change the type
__lowerCamelCase : List[Any] = arr.split(',' )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCamelCase : List[Any] = [int(self.array[0] )] * len(self.array )
__lowerCamelCase : Optional[int] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowerCamelCase : str = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowerCamelCase : int = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_UpperCamelCase = input('please input some numbers:')
_UpperCamelCase = SubArray(whole_array)
_UpperCamelCase = array.solve_sub_array()
print(('the results is:', re))
| 208
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( _lowerCAmelCase ) -> str:
__lowerCamelCase ,__lowerCamelCase : List[Any] = image.size
__lowerCamelCase ,__lowerCamelCase : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCamelCase : Optional[Any] = image.resize((w, h) ,resample=PIL_INTERPOLATION['lanczos'] )
__lowerCamelCase : List[Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
__lowerCamelCase : Optional[Any] = image[None].transpose(0 ,3 ,1 ,2 )
__lowerCamelCase : int = torch.from_numpy(_lowerCAmelCase )
return 2.0 * image - 1.0
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : str , _a : VQModel , _a : UNetaDModel , _a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Optional[Any]:
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] , _a : Union[torch.Tensor, PIL.Image.Image] = None , _a : Optional[int] = 1 , _a : Optional[int] = 100 , _a : Optional[float] = 0.0 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[str] = "pil" , _a : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_a , PIL.Image.Image ):
__lowerCamelCase : Any = 1
elif isinstance(_a , torch.Tensor ):
__lowerCamelCase : Any = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a , PIL.Image.Image ):
__lowerCamelCase : List[str] = preprocess(_a )
__lowerCamelCase ,__lowerCamelCase : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowerCamelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowerCamelCase : Tuple = next(self.unet.parameters() ).dtype
__lowerCamelCase : Optional[int] = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__lowerCamelCase : Optional[int] = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__lowerCamelCase : Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : List[str] = {}
if accepts_eta:
__lowerCamelCase : str = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__lowerCamelCase : str = torch.cat([latents, image] , dim=1 )
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[Any] = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__lowerCamelCase : Union[str, Any] = self.vqvae.decode(_a ).sample
__lowerCamelCase : Union[str, Any] = torch.clamp(_a , -1.0 , 1.0 )
__lowerCamelCase : List[str] = image / 2 + 0.5
__lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 208
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=13 , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : List[str]=99 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Tuple=512 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str="None" , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : int=4 , SCREAMING_SNAKE_CASE : Any=None , ):
lowercase__ : Any = parent
lowercase__ : Dict = batch_size
lowercase__ : Optional[Any] = seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = use_input_mask
lowercase__ : List[Any] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Dict = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : List[str] = num_labels
lowercase__ : Tuple = num_choices
lowercase__ : Optional[int] = relative_attention
lowercase__ : Tuple = position_biased_input
lowercase__ : Any = pos_att_type
lowercase__ : Union[str, Any] = scope
def snake_case ( self : Dict ):
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[str] = None
if self.use_input_mask:
lowercase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Tuple = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : List[str] = None
lowercase__ : str = None
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Optional[int] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Optional[Any] = TFDebertaVaModel(config=a_ )
lowercase__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ : Dict = [input_ids, input_mask]
lowercase__ : str = model(a_ )
lowercase__ : Optional[int] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : int = TFDebertaVaForMaskedLM(config=a_ )
lowercase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Tuple = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : str = self.num_labels
lowercase__ : List[Any] = TFDebertaVaForSequenceClassification(config=a_ )
lowercase__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : List[str] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : str = self.num_labels
lowercase__ : Union[str, Any] = TFDebertaVaForTokenClassification(config=a_ )
lowercase__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Tuple = TFDebertaVaForQuestionAnswering(config=a_ )
lowercase__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : List[Any] = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = config_and_inputs
lowercase__ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = TFDebertaVaModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def snake_case ( self : Any ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def snake_case ( self : int ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def snake_case ( self : List[Any] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def snake_case ( self : Any ):
lowercase__ : Any = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a_ )
@require_tf
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def snake_case ( self : Any ):
pass
@slow
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
lowercase__ : str = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowercase__ : Any = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Optional[int] = model(a_ , attention_mask=a_ )[0]
lowercase__ : Any = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a_ , atol=1E-4 )
| 359
|
from math import ceil, sqrt
def __lowerCamelCase ( lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : int = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162
|
'''simple docstring'''
import math
import unittest
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 162
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
lowerCAmelCase :Any = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = """mobilenet_v2"""
def __init__( self : Tuple , _A : List[Any]=3 , _A : Union[str, Any]=224 , _A : int=1.0 , _A : int=8 , _A : Optional[Any]=8 , _A : Optional[int]=6 , _A : List[str]=32 , _A : Any=True , _A : Tuple=True , _A : Tuple="relu6" , _A : Optional[int]=True , _A : List[Any]=0.8 , _A : List[Any]=0.02 , _A : List[str]=0.001 , _A : List[str]=255 , **_A : List[Any] , ) -> Tuple:
super().__init__(**_A )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__magic_name__ : Dict = num_channels
__magic_name__ : List[Any] = image_size
__magic_name__ : List[str] = depth_multiplier
__magic_name__ : str = depth_divisible_by
__magic_name__ : Tuple = min_depth
__magic_name__ : str = expand_ratio
__magic_name__ : Optional[int] = output_stride
__magic_name__ : Optional[int] = first_layer_is_expansion
__magic_name__ : str = finegrained_output
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : List[Any] = tf_padding
__magic_name__ : int = classifier_dropout_prob
__magic_name__ : List[Any] = initializer_range
__magic_name__ : int = layer_norm_eps
__magic_name__ : int = semantic_loss_ignore_index
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase : int = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def __lowerCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def __lowerCAmelCase ( self : Dict ) -> float:
return 1E-4
| 367
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase )
if number < 0:
return False
__magic_name__ : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : str = len(A_ ) + 1
__snake_case : Dict = len(A_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__snake_case : str = [[0 for i in range(A_ )] for j in range(A_ )]
# since string of zero length match pattern of zero length
__snake_case : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , A_ ):
__snake_case : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , A_ ):
__snake_case : List[str] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , A_ ):
for j in range(1 , A_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__snake_case : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__snake_case : Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__snake_case : int = dp[i - 1][j]
else:
__snake_case : int = 0
else:
__snake_case : Optional[int] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_snake_case : Any = '''aab'''
_snake_case : List[Any] = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 123
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase_ ( A_ ,A_ ,A_):
UpperCamelCase__: Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
UpperCamelCase__: Dict = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(A_):
os.makedirs(A_)
UpperCamelCase__: Optional[Any] = model.state_dict()
def to_tf_var_name(A_):
for patt, repl in iter(A_):
UpperCamelCase__: Optional[Any] = name.replace(A_ ,A_)
return F"bert/{name}"
def create_tf_var(A_ ,A_ ,A_):
UpperCamelCase__: Any = tf.dtypes.as_dtype(tensor.dtype)
UpperCamelCase__: int = tf.get_variable(dtype=A_ ,shape=tensor.shape ,name=A_ ,initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(A_)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase__: List[Any] = to_tf_var_name(A_)
UpperCamelCase__: List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
UpperCamelCase__: List[Any] = torch_tensor.T
UpperCamelCase__: int = create_tf_var(tensor=A_ ,name=A_ ,session=A_)
tf.keras.backend.set_value(A_ ,A_)
UpperCamelCase__: Optional[Any] = session.run(A_)
print(F"Successfully created {tf_name}: {np.allclose(A_ ,A_)}")
UpperCamelCase__: Tuple = tf.train.Saver(tf.trainable_variables())
saver.save(A_ ,os.path.join(A_ ,model_name.replace("-" ,"_") + ".ckpt"))
def lowerCAmelCase_ ( A_=None):
UpperCamelCase__: Tuple = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=A_ ,required=A_ ,help="model name e.g. bert-base-uncased")
parser.add_argument(
"--cache_dir" ,type=A_ ,default=A_ ,required=A_ ,help="Directory containing pytorch model")
parser.add_argument("--pytorch_model_path" ,type=A_ ,required=A_ ,help="/path/to/<pytorch-model-name>.bin")
parser.add_argument("--tf_cache_dir" ,type=A_ ,required=A_ ,help="Directory in which to save tensorflow model")
UpperCamelCase__: Any = parser.parse_args(A_)
UpperCamelCase__: List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=A_ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name)
if __name__ == "__main__":
main()
| 149
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionSAGPipeline
a_ = TEXT_TO_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_BATCH_PARAMS
a_ = TEXT_TO_IMAGE_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_IMAGE_PARAMS
a_ = False
def lowercase ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(lowerCAmelCase_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int=0 ) -> Dict:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : int ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowerCAmelCase = sag_pipe.to(lowerCAmelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = '.'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sag_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase = sag_pipe.to(lowerCAmelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = '.'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sag_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase = sag_pipe.to(lowerCAmelCase_ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = '.'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=lowerCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' , )
__lowerCAmelCase = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 371
|
from math import factorial
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ) -> Union[str, Any]:
__lowerCAmelCase = real
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = [1] * rank
else:
__lowerCAmelCase = rank
def __repr__( self : Optional[Any] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{"+".join(str(lowerCAmelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowercase ( self : str ) -> Dict:
__lowerCAmelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCAmelCase_ )
def __add__( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return Dual(self.real + other , self.duals )
__lowerCAmelCase = self.duals.copy()
__lowerCAmelCase = other.duals.copy()
if len(lowerCAmelCase_ ) > len(lowerCAmelCase_ ):
o_dual.extend([1] * (len(lowerCAmelCase_ ) - len(lowerCAmelCase_ )) )
elif len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ):
s_dual.extend([1] * (len(lowerCAmelCase_ ) - len(lowerCAmelCase_ )) )
__lowerCAmelCase = []
for i in range(len(lowerCAmelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCAmelCase_ )
a_ = __add__
def __sub__( self : int , lowerCAmelCase_ : Dict ) -> Optional[Any]:
return self + other * -1
def __mul__( self : int , lowerCAmelCase_ : Optional[int] ) -> Dict:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCAmelCase_ )
__lowerCAmelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCAmelCase_ )
a_ = __mul__
def __truediv__( self : Union[str, Any] , lowerCAmelCase_ : str ) -> Dict:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCAmelCase_ )
raise ValueError
def __floordiv__( self : str , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCAmelCase_ )
raise ValueError
def __pow__( self : Tuple , lowerCAmelCase_ : Dict ) -> List[str]:
if n < 0 or isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
__lowerCAmelCase = self
for _ in range(n - 1 ):
x *= self
return x
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
if not callable(lowerCAmelCase_ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(lowerCAmelCase_, (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError('differentiate() requires an int as input for order' )
__lowerCAmelCase = Dual(lowerCAmelCase_, 1 )
__lowerCAmelCase = func(lowerCAmelCase_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def a_ ( lowerCAmelCase_ : int ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 207
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] = None , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : int = False , lowerCAmelCase__ : Optional[int] = False , lowerCAmelCase__ : Dict = None , **lowerCAmelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = path_or_paths
_UpperCamelCase = split if split or isinstance(A_ , A_ ) else '''train'''
_UpperCamelCase = features
_UpperCamelCase = cache_dir
_UpperCamelCase = keep_in_memory
_UpperCamelCase = streaming
_UpperCamelCase = num_proc
_UpperCamelCase = kwargs
@abstractmethod
def snake_case__ ( self : Optional[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : Tuple = False , lowerCAmelCase__ : Tuple = False , lowerCAmelCase__ : List[str] = None , **lowerCAmelCase__ : List[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = features
_UpperCamelCase = cache_dir
_UpperCamelCase = keep_in_memory
_UpperCamelCase = streaming
_UpperCamelCase = num_proc
_UpperCamelCase = kwargs
@abstractmethod
def snake_case__ ( self : int ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 324
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = ConsistencyModelPipeline
_UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_UpperCamelCase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[str] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def UpperCamelCase__ ( self , A_=False ) ->Dict:
'''simple docstring'''
if class_cond:
__lowerCAmelCase : List[str] = self.dummy_cond_unet
else:
__lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ ( self , A_ , A_=0 ) ->Tuple:
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
__lowerCAmelCase : str = torch.manual_seed(A_ )
else:
__lowerCAmelCase : Dict = torch.Generator(device=A_ ).manual_seed(A_ )
__lowerCAmelCase : Tuple = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Tuple = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_dummy_inputs(A_ )
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCAmelCase : str = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : str = self.get_dummy_components(class_cond=A_ )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : List[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(A_ )
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[str] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
__lowerCAmelCase : List[Any] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : int = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(A_ )
__lowerCAmelCase : Any = 1
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase : Optional[Any] = self.get_dummy_components(class_cond=A_ )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(**A_ )
__lowerCAmelCase : Union[str, Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_dummy_inputs(A_ )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->str:
'''simple docstring'''
__lowerCAmelCase : Dict = torch.manual_seed(A_ )
__lowerCAmelCase : Tuple = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase : List[str] = self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ )
__lowerCAmelCase : Union[str, Any] = latents
return inputs
def UpperCamelCase__ ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) ->Optional[int]:
'''simple docstring'''
if type(A_ ) == str:
__lowerCAmelCase : int = torch.device(A_ )
__lowerCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
__lowerCAmelCase : Union[str, Any] = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
return latents
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : str = self.get_inputs()
__lowerCAmelCase : Any = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : List[str] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : List[Any] = self.get_inputs()
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : str = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Any = self.get_inputs(get_fixed_latents=A_ , device=A_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__lowerCAmelCase : Dict = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
__lowerCAmelCase : Optional[int] = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__lowerCAmelCase : Union[str, Any] = ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__lowerCAmelCase : Union[str, Any] = self.get_inputs(get_fixed_latents=A_ , device=A_ )
__lowerCAmelCase : Any = 1
__lowerCAmelCase : int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__lowerCAmelCase : int = pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : str = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 275
| 0
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a , _a = [], []
while len(_lowerCAmelCase ) > 1:
_a , _a = min(_lowerCAmelCase ), max(_lowerCAmelCase )
start.append(_lowerCAmelCase )
end.append(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 369
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : List[str], _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = BertConfig.from_json_file(_lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
_a = BertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 153
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase__ ( a , a=10_00 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_A: List[Any] = n - 1
_A: Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_A: List[str] = 0
while count < prec:
_A: Optional[int] = random.randint(2 , n - 1 )
_A: Union[str, Any] = bin_exp_mod(a , a , a )
if b != 1:
_A: Optional[Any] = True
for _ in range(a ):
if b == n - 1:
_A: int = False
break
_A: Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCAmelCase__ : Dict = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 121
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case__ ( _a ):
_snake_case : List[Any] = None
_snake_case : List[str] = None
_snake_case : Optional[Any] = None
_snake_case : Tuple = None
class snake_case__ ( _a ):
def __init__( self , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=512 , lowerCamelCase="cls" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__a = project_dim
__a = pooler_fn
__a = learn_encoder
__a = use_attention_mask
class snake_case__ ( _a ):
_snake_case : Union[str, Any] = [r"""pooler""", r"""logit_scale"""]
_snake_case : Dict = [r"""position_ids""", r"""predictions.decoder.bias"""]
_snake_case : str = """roberta"""
_snake_case : List[Any] = RobertaSeriesConfig
def __init__( self , lowerCamelCase ):
super().__init__(_a )
__a = XLMRobertaModel(_a )
__a = nn.Linear(config.hidden_size , config.project_dim )
__a = getattr(_a , "has_pre_transformation" , _a )
if self.has_pre_transformation:
__a = nn.Linear(config.hidden_size , config.project_dim )
__a = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def a__ ( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = self.base_model(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_attentions=_a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_a , )
if self.has_pre_transformation:
__a = outputs["hidden_states"][-2]
__a = self.pre_LN(_a )
__a = self.transformation_pre(_a )
return TransformationModelOutput(
projection_state=_a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 370
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
def _lowerCamelCase( a , a , a ):
__a = WavaVecaForSequenceClassification.from_pretrained(a , config=a )
__a = downstream_dict["projector.weight"]
__a = downstream_dict["projector.bias"]
__a = downstream_dict["model.post_net.linear.weight"]
__a = downstream_dict["model.post_net.linear.bias"]
return model
def _lowerCamelCase( a , a , a ):
__a = WavaVecaForAudioFrameClassification.from_pretrained(a , config=a )
__a = downstream_dict["model.linear.weight"]
__a = downstream_dict["model.linear.bias"]
return model
def _lowerCamelCase( a , a , a ):
__a = WavaVecaForXVector.from_pretrained(a , config=a )
__a = downstream_dict["connector.weight"]
__a = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__a = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__a = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
__a = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
__a = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _lowerCamelCase( a , a , a , a ):
__a = torch.load(a , map_location="cpu" )
__a = checkpoint["Downstream"]
__a = WavaVecaConfig.from_pretrained(a )
__a = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
__a = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
__a = convert_classification(a , a , a )
elif arch.endswith("ForAudioFrameClassification" ):
__a = convert_diarization(a , a , a )
elif arch.endswith("ForXVector" ):
__a = convert_xvector(a , a , a )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__a = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 268
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**A_ )
lowerCAmelCase = size if size is not None else {'''shortest_edge''': 256}
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase = get_size_dict(A_ )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase = get_resize_output_image_size(A_ , size=size["""shortest_edge"""] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _snake_case ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(A_ )
return center_crop(A_ , size=(size["""height"""], size["""width"""]) , data_format=A_ , **A_ )
def _snake_case ( self , lowercase , lowercase , lowercase = None , **lowercase ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Tuple:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(A_ , default_to_square=A_ )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(A_ )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 46
|
def _lowercase ( lowercase__ , lowercase__ ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaPriorPipeline
a = ["prompt"]
a = ["prompt", "negative_prompt"]
a = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a = False
@property
def A ( self : List[str]):
return 32
@property
def A ( self : List[Any]):
return 32
@property
def A ( self : Dict):
return self.time_input_dim
@property
def A ( self : Tuple):
return self.time_input_dim * 4
@property
def A ( self : Optional[int]):
return 100
@property
def A ( self : Dict):
_A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def A ( self : Optional[Any]):
torch.manual_seed(0)
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE)
@property
def A ( self : List[Any]):
torch.manual_seed(0)
_A : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_A : Any = PriorTransformer(**SCREAMING_SNAKE_CASE)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A : str = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A : Union[str, Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE)
return model
@property
def A ( self : int):
_A : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def A ( self : Optional[Any]):
_A : Optional[int] = self.dummy_prior
_A : Dict = self.dummy_image_encoder
_A : Dict = self.dummy_text_encoder
_A : str = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_image_processor
_A : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , )
_A : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A ( self : List[Any]):
_A : str = 'cpu'
_A : Tuple = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
_A : Any = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE))
_A : str = output.image_embeds
_A : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0]
_A : Optional[int] = image[0, -10:]
_A : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A : Dict = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def A ( self : Any):
_A : Tuple = torch_device == 'cpu'
_A : Optional[int] = True
_A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
@skip_mps
def A ( self : int):
_A : Tuple = torch_device == 'cpu'
_A : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
| 227
| 1
|
A__ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A__ = [{'type': 'code', 'content': INSTALL_CONTENT}]
A__ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 230
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Dict, lowerCamelCase : List[str]=2, lowerCamelCase : List[str]=32, lowerCamelCase : Tuple=16, lowerCamelCase : List[Any]=3, lowerCamelCase : List[Any]=True, lowerCamelCase : str=True, lowerCamelCase : Tuple=32, lowerCamelCase : Dict=4, lowerCamelCase : Dict=[0, 1, 2, 3], lowerCamelCase : Union[str, Any]=4, lowerCamelCase : Optional[int]=37, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : List[Any]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Optional[int]=0.02, lowerCamelCase : str=3, lowerCamelCase : str=[1, 384, 24, 24], lowerCamelCase : Optional[int]=True, lowerCamelCase : List[str]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = backbone_out_indices
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = backbone_featmap_shape
lowercase__ = scope
lowercase__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=lowerCamelCase, backbone_featmap_shape=self.backbone_featmap_shape, )
def lowercase__ ( self : str, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = DPTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = DPTForDepthEstimation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) )
def lowercase__ ( self : int, lowerCamelCase : Any, lowerCamelCase : Optional[int], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = DPTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowercase__ = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = DPTModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
if model_class in get_values(lowerCamelCase ):
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = False
lowercase__ = True
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase )
# Skip the check for the backbone
lowercase__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase__ = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : int ):
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Dict ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase__ = DPTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = '''add'''
with self.assertRaises(lowerCamelCase ):
lowercase__ = DPTForDepthEstimation(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase__ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(lowerCamelCase )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
lowercase__ = outputs.predicted_depth
# verify the predicted depth
lowercase__ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape, lowerCamelCase )
lowercase__ = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, lowerCamelCase, atol=1E-4 ) )
| 207
| 0
|
import argparse
import datetime
def __magic_name__ ( __a : str ):
'''simple docstring'''
UpperCamelCase__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
UpperCamelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__a ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
UpperCamelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
UpperCamelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
UpperCamelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
UpperCamelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
UpperCamelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
UpperCamelCase__ = datetime.date(int(__a ) , int(__a ) , int(__a ) )
# Start math
if m <= 2:
UpperCamelCase__ = y - 1
UpperCamelCase__ = m + 12
# maths var
UpperCamelCase__ = int(str(__a )[:2] )
UpperCamelCase__ = int(str(__a )[2:] )
UpperCamelCase__ = int(2.6 * m - 5.39 )
UpperCamelCase__ = int(c / 4 )
UpperCamelCase__ = int(k / 4 )
UpperCamelCase__ = int(d + k )
UpperCamelCase__ = int(t + u + v + x )
UpperCamelCase__ = int(z - (2 * c) )
UpperCamelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
UpperCamelCase__ = f"Your date {date_input}, is a {days[str(__a )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCamelCase_ = parser.parse_args()
zeller(args.date_input)
| 178
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ (*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
pass
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = np.array(__a )
UpperCamelCase__ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCAmelCase_ (self ):
pass
@slow
@require_torch
def UpperCAmelCase_ (self ):
UpperCamelCase__ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
UpperCamelCase__ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """facebook/sam-vit-huge"""
UpperCamelCase__ = pipeline("""mask-generation""" , model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
] , )
| 178
| 1
|
"""simple docstring"""
import functools
def _snake_case ( lowercase__ : int , lowercase__ : Any ) -> List[Any]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_SCREAMING_SNAKE_CASE ) >= 3_6_6:
raise ValueError("""All days elements should be less than 366""" )
lowerCAmelCase_ :Optional[Any] = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(lowercase__ : Tuple ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
UpperCamelCase = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 153
| 0
|
import numpy
# List of input, output pairs
UpperCamelCase__ : int = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase__ : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCamelCase__ : str = [2, 4, 1, 5]
UpperCamelCase__ : Union[str, Any] = len(train_data)
UpperCamelCase__ : List[Any] = 0.0_0_9
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_="train" ) -> List[str]:
"""simple docstring"""
return calculate_hypothesis_value(snake_case_, snake_case_ ) - output(
snake_case_, snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = 0
for i in range(len(snake_case_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Optional[Any]:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=m ) -> Any:
"""simple docstring"""
a = 0
for i in range(snake_case_ ):
if index == -1:
summation_value += _error(snake_case_ )
else:
summation_value += _error(snake_case_ ) * train_data[i][0][index]
return summation_value
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = summation_of_cost_derivative(snake_case_, snake_case_ ) / m
return cost_derivative_value
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a = 0.00_0002
a = 0
a = 0
while True:
j += 1
a = [0, 0, 0, 0]
for i in range(0, len(snake_case_ ) ):
a = get_cost_derivative(i - 1 )
a = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case_, snake_case_, atol=snake_case_, rtol=snake_case_, ):
break
a = temp_parameter_vector
print(('''Number of iterations:''', j) )
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
"""simple docstring"""
for i in range(len(snake_case_ ) ):
print(('''Actual output value:''', output(snake_case_, '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(snake_case_, '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 330
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
UpperCamelCase__ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab))))
UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Optional[Any] = Path(tmpdirname)
UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
UpperCamelCase__ : Dict = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase__ : Union[str, Any] = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
UpperCamelCase__ : Tuple = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 330
| 1
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "autoformer"
lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : List[Any] , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : str = "student_t" , snake_case_ : str = "nll" , snake_case_ : int = 1 , snake_case_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case_ : bool = True , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : int = 0 , snake_case_ : Optional[List[int]] = None , snake_case_ : Optional[List[int]] = None , snake_case_ : int = 64 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 2 , snake_case_ : int = 32 , snake_case_ : int = 32 , snake_case_ : str = "gelu" , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : int = 100 , snake_case_ : float = 0.02 , snake_case_ : bool = True , snake_case_ : Optional[Any]=True , snake_case_ : int = 10 , snake_case_ : int = 25 , snake_case_ : int = 3 , **snake_case_ : Optional[int] , ):
# time series specific configuration
snake_case__ : Optional[Any] = prediction_length
snake_case__ : List[str] = context_length if context_length is not None else prediction_length
snake_case__ : List[str] = distribution_output
snake_case__ : Dict = loss
snake_case__ : List[str] = input_size
snake_case__ : Any = num_time_features
snake_case__ : Any = lags_sequence
snake_case__ : List[Any] = scaling
snake_case__ : List[Any] = num_dynamic_real_features
snake_case__ : Union[str, Any] = num_static_real_features
snake_case__ : List[str] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case__ : Union[str, Any] = cardinality
else:
snake_case__ : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case__ : Union[str, Any] = embedding_dimension
else:
snake_case__ : List[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ : List[str] = num_parallel_samples
# Transformer architecture configuration
snake_case__ : str = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case__ : Optional[Any] = d_model
snake_case__ : Optional[Any] = encoder_attention_heads
snake_case__ : str = decoder_attention_heads
snake_case__ : Any = encoder_ffn_dim
snake_case__ : int = decoder_ffn_dim
snake_case__ : Tuple = encoder_layers
snake_case__ : Dict = decoder_layers
snake_case__ : List[str] = dropout
snake_case__ : Optional[Any] = attention_dropout
snake_case__ : List[Any] = activation_dropout
snake_case__ : Tuple = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : int = activation_function
snake_case__ : str = init_std
snake_case__ : str = use_cache
# Autoformer
snake_case__ : Optional[Any] = label_length
snake_case__ : Dict = moving_average
snake_case__ : Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase ( self : List[str] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 35
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 )
if "base" in model_name:
UpperCAmelCase_ : Any = 6
UpperCAmelCase_ : Optional[Any] = 1_28
UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : int = 1_92
UpperCAmelCase_ : List[Any] = (2, 2, 18, 2)
UpperCAmelCase_ : int = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase_ : str = window_size
UpperCAmelCase_ : Any = embed_dim
UpperCAmelCase_ : int = depths
UpperCAmelCase_ : Any = num_heads
return config
def snake_case ( A__ ):
if "encoder.mask_token" in name:
UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" )
if "norm1" in name:
UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ : List[str] = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ : int = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCAmelCase_ : Any = "swin." + name
return name
def snake_case ( A__ ,A__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase_ : Optional[int] = key.split("." )
UpperCAmelCase_ : str = int(key_split[2] )
UpperCAmelCase_ : Union[str, Any] = int(key_split[4] )
UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : List[Any] = val[:dim, :]
UpperCAmelCase_ : str = val[
dim : dim * 2, :
]
UpperCAmelCase_ : str = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[
:dim
]
UpperCAmelCase_ : str = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[
-dim:
]
else:
UpperCAmelCase_ : Tuple = val
return orig_state_dict
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"]
UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ )
UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ )
model.eval()
UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ )
model.load_state_dict(A__ )
UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} )
UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw )
UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**A__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 268
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['input_features', 'attention_mask']
def __init__( self : int , lowercase_ : int=80 , lowercase_ : Optional[Any]=1_6000 , lowercase_ : Optional[Any]=80 , lowercase_ : List[Any]=0.0 , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=True , lowercase_ : str=True , **lowercase_ : Tuple , ):
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ )
UpperCamelCase__ : int =num_mel_bins
UpperCamelCase__ : int =do_ceptral_normalize
UpperCamelCase__ : str =normalize_means
UpperCamelCase__ : Tuple =normalize_vars
UpperCamelCase__ : str =True
def _lowerCAmelCase ( self : int , lowercase_ : np.ndarray , ):
UpperCamelCase__ : int =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase__ : Union[str, Any] =torch.from_numpy(lowercase_ ).unsqueeze(0 )
UpperCamelCase__ : List[str] =ta_kaldi.fbank(lowercase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCAmelCase ( lowercase_ : np.ndarray , lowercase_ : int , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool] = True , lowercase_ : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
UpperCamelCase__ : Any =x[:input_length].mean(axis=0 )
UpperCamelCase__ : int =np.subtract(lowercase_ , lowercase_ )
if normalize_vars:
UpperCamelCase__ : str =x[:input_length].std(axis=0 )
UpperCamelCase__ : Union[str, Any] =np.divide(lowercase_ , lowercase_ )
if input_length < x.shape[0]:
UpperCamelCase__ : Dict =padding_value
# make sure array is in float32
UpperCamelCase__ : Optional[int] =x.astype(np.floataa )
return x
def _lowerCAmelCase ( self : List[str] , lowercase_ : List[np.ndarray] , lowercase_ : Optional[np.ndarray] = None ):
UpperCamelCase__ : Dict =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase_ , lowercase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowercase_ , lowercase_ )
]
def __call__( self : List[Any] , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , **lowercase_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase__ : str =isinstance(lowercase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase__ : int =is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ : Any =[np.asarray(lowercase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
UpperCamelCase__ : Optional[Any] =np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ : str =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ : Tuple =[raw_speech]
# extract fbank features
UpperCamelCase__ : Union[str, Any] =[self._extract_fbank_features(lowercase_ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase__ : int =BatchFeature({'''input_features''': features} )
UpperCamelCase__ : List[str] =self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
# make sure list is in array format
UpperCamelCase__ : Any =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , lowercase_ ):
UpperCamelCase__ : int =[np.asarray(lowercase_ , dtype=np.floataa ) for feature in input_features]
UpperCamelCase__ : Union[str, Any] =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCamelCase__ : List[str] =[np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase__ : Union[str, Any] =(
np.array(lowercase_ , dtype=np.intaa )
if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase__ : List[Any] =self.normalize(
padded_inputs['''input_features'''] , attention_mask=lowercase_ )
if return_tensors is not None:
UpperCamelCase__ : Optional[Any] =padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
| 157
|
"""simple docstring"""
import argparse
import os
import re
_SCREAMING_SNAKE_CASE : List[str] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
_SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : Any = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : str = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"""\[([^\]]+)\]""")
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : str =_re_indent.search(UpperCAmelCase )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Union[str, Any]="" , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ : int =0
UpperCamelCase__ : Union[str, Any] =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase ):
index += 1
UpperCamelCase__ : Optional[int] =['''\n'''.join(lines[:index] )]
else:
UpperCamelCase__ : List[Any] =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ : Dict =[lines[index]]
index += 1
while index < len(UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(UpperCAmelCase ) )
if index < len(UpperCAmelCase ) - 1:
UpperCamelCase__ : Optional[Any] =[lines[index + 1]]
index += 1
else:
UpperCamelCase__ : List[str] =[]
else:
blocks.append('''\n'''.join(UpperCAmelCase ) )
UpperCamelCase__ : List[Any] =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase ) > 0:
blocks.append('''\n'''.join(UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( UpperCAmelCase : str ):
'''simple docstring'''
def _inner(UpperCAmelCase : Dict ):
return key(UpperCAmelCase ).lower().replace('''_''' , '''''' )
return _inner
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Dict=None ):
'''simple docstring'''
def noop(UpperCAmelCase : Optional[Any] ):
return x
if key is None:
UpperCamelCase__ : int =noop
# Constants are all uppercase, they go first.
UpperCamelCase__ : List[str] =[obj for obj in objects if key(UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ : Dict =[obj for obj in objects if key(UpperCAmelCase )[0].isupper() and not key(UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ : int =[obj for obj in objects if not key(UpperCAmelCase )[0].isupper()]
UpperCamelCase__ : Optional[int] =ignore_underscore(UpperCAmelCase )
return sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
def _replace(UpperCAmelCase : Union[str, Any] ):
UpperCamelCase__ : List[str] =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ : Tuple =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] ) + "]"
UpperCamelCase__ : List[Any] =import_statement.split('''\n''' )
if len(UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ : List[str] =2 if lines[1].strip() == '''[''' else 1
UpperCamelCase__ : List[str] =[(i, _re_strip_line.search(UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ : List[str] =sort_objects(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )
UpperCamelCase__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ : Dict =_re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ : Tuple =keys[:-1]
UpperCamelCase__ : Optional[Any] =get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] )
return "\n".join(UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ : List[str] =_re_bracket_content.sub(_replace , UpperCAmelCase )
return import_statement
def _lowerCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=True ):
'''simple docstring'''
with open(UpperCAmelCase , '''r''' ) as f:
UpperCamelCase__ : int =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ : Optional[int] =split_code_in_indented_blocks(
UpperCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ : Dict =main_blocks[block_idx]
UpperCamelCase__ : List[str] =block.split('''\n''' )
# Get to the start of the imports.
UpperCamelCase__ : str =0
while line_idx < len(UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ : Optional[int] =len(UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[line_idx:-1] )
UpperCamelCase__ : Tuple =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ : str =split_code_in_indented_blocks(UpperCAmelCase , indent_level=UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ : str =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ : Tuple =[(pattern.search(UpperCAmelCase ).groups()[0] if pattern.search(UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ : List[Any] =[(i, key) for i, key in enumerate(UpperCAmelCase ) if key is not None]
UpperCamelCase__ : Optional[Any] =[x[0] for x in sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ : Union[str, Any] =0
UpperCamelCase__ : str =[]
for i in range(len(UpperCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write('''\n'''.join(UpperCAmelCase ) )
def _lowerCAmelCase ( UpperCAmelCase : Dict=True ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =[]
for root, _, files in os.walk(UpperCAmelCase ):
if "__init__.py" in files:
UpperCamelCase__ : List[Any] =sort_imports(os.path.join(UpperCAmelCase , '''__init__.py''' ) , check_only=UpperCAmelCase )
if result:
UpperCamelCase__ : int =[os.path.join(UpperCAmelCase , '''__init__.py''' )]
if len(UpperCAmelCase ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCAmelCase )} files, run `make style`.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 157
| 1
|
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self , lowerCamelCase_ = 1 , lowerCamelCase_ = None , lowerCamelCase_ = 50 , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase_ , )
a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase_ ), "This is a local test"
| 227
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a( A : Tuple ) -> Optional[Any]:
"""simple docstring"""
a = model.config
a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
a = MBartConfig(
is_decoder=A , is_encoder_decoder=A , add_cross_attention=A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=A , add_final_layer_norm=A , )
return encoder_config, decoder_config
def a( A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
a = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
a = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
a = "encoder." + name
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
a = name.replace("attn" , "attention.self" )
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
a = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
a = "encoder.layernorm.bias"
return name
def a( A : Union[str, Any] , A : Tuple ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(A )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = int(key_split[5] )
a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a = val
return orig_state_dict
def a( A : List[Any] , A : Tuple=None , A : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
a = DonutModel.from_pretrained(A ).eval()
# load HuggingFace model
a , a = get_configs(A )
a = DonutSwinModel(A )
a = MBartForCausalLM(A )
a = VisionEncoderDecoderModel(encoder=A , decoder=A )
model.eval()
a = original_model.state_dict()
a = convert_state_dict(A , A )
model.load_state_dict(A )
# verify results on scanned document
a = load_dataset("hf-internal-testing/example-documents" )
a = dataset["test"][0]["image"].convert("RGB" )
a = XLMRobertaTokenizerFast.from_pretrained(A , from_slow=A )
a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
a = DonutProcessor(A , A )
a = processor(A , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a = "When is the coffee break?"
a = task_prompt.replace("{user_input}" , A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a = "hello world"
else:
raise ValueError("Model name not supported" )
a = original_model.decoder.tokenizer(A , add_special_tokens=A , return_tensors="pt" )[
"input_ids"
]
a = original_model.encoder.model.patch_embed(A )
a , a = model.encoder.embeddings(A )
assert torch.allclose(A , A , atol=1e-3 )
# verify encoder hidden states
a = original_model.encoder(A )
a = model.encoder(A ).last_hidden_state
assert torch.allclose(A , A , atol=1e-2 )
# verify decoder hidden states
a = original_model(A , A , A ).logits
a = model(A , decoder_input_ids=A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
_lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowercase: Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227
| 1
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = np.full((len(A_ ), sequence_length, 2) , A_ )
else:
__SCREAMING_SNAKE_CASE = np.full((len(A_ ), sequence_length) , A_ )
for i, tensor in enumerate(A_ ):
if padding_side == "right":
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
if isinstance(A_ , A_ ):
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
else:
__SCREAMING_SNAKE_CASE = tensor[:sequence_length]
return out_tensor.tolist()
def _lowerCAmelCase ( UpperCamelCase_ ) -> List[str]:
__SCREAMING_SNAKE_CASE = ord(A_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__SCREAMING_SNAKE_CASE = unicodedata.category(A_ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE_ ( a_ ):
"""simple docstring"""
__lowercase : List[Any] = 42
__lowercase : Union[str, Any] = True
__lowercase : Optional[Any] = None
__lowercase : Dict = None
__lowercase : Any = -100
__lowercase : str = '''pt'''
def snake_case_ ( self , lowerCAmelCase__):
import torch
__SCREAMING_SNAKE_CASE = '''label''' if '''label''' in features[0].keys() else '''labels'''
__SCREAMING_SNAKE_CASE = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
__SCREAMING_SNAKE_CASE = torch.tensor(batch["""entity_ids"""]).shape[1]
__SCREAMING_SNAKE_CASE = self.tokenizer.padding_side
if padding_side == "right":
__SCREAMING_SNAKE_CASE = [
list(lowercase_) + [self.label_pad_token_id] * (sequence_length - len(lowercase_)) for label in labels
]
else:
__SCREAMING_SNAKE_CASE = [
[self.label_pad_token_id] * (sequence_length - len(lowercase_)) + list(lowercase_) for label in labels
]
__SCREAMING_SNAKE_CASE = [feature['''ner_tags'''] for feature in features]
__SCREAMING_SNAKE_CASE = padding_tensor(lowercase_ , -1 , lowercase_ , lowercase_)
__SCREAMING_SNAKE_CASE = [feature['''original_entity_spans'''] for feature in features]
__SCREAMING_SNAKE_CASE = padding_tensor(lowercase_ , (-1, -1) , lowercase_ , lowercase_)
__SCREAMING_SNAKE_CASE = {k: torch.tensor(lowercase_ , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 358
|
"""simple docstring"""
from string import ascii_uppercase
__magic_name__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while div != 1:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = divmod(UpperCamelCase_ , UpperCamelCase_ )
if base >= 11 and 9 < mod < 36:
__SCREAMING_SNAKE_CASE = ALPHABET_VALUES[str(UpperCamelCase_ )]
else:
__SCREAMING_SNAKE_CASE = str(UpperCamelCase_ )
new_value += actual_value
__SCREAMING_SNAKE_CASE = num // base
__SCREAMING_SNAKE_CASE = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 255
| 0
|
from __future__ import annotations
lowercase = 1.6_021e-19 # units = C
def __UpperCAmelCase ( a_ , a_ , a_ , ):
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError('You cannot supply more or less than 2 values')
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative')
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative')
elif mobility < 0:
raise ValueError('mobility cannot be negative')
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
|
from __future__ import annotations
def __UpperCAmelCase ( a_ , a_ , a_ , a_): # noqa: E741
while r - l > 1:
snake_case_ = (l + r) // 2
if v[m] >= key:
snake_case_ = m
else:
snake_case_ = m # noqa: E741
return r
def __UpperCAmelCase ( a_):
if len(a_) == 0:
return 0
snake_case_ = [0] * len(a_)
snake_case_ = 1
snake_case_ = v[0]
for i in range(1 , len(a_)):
if v[i] < tail[0]:
snake_case_ = v[i]
elif v[i] > tail[length - 1]:
snake_case_ = v[i]
length += 1
else:
snake_case_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
__lowerCAmelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__lowerCAmelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Union[str, Any]:
return {
"matthews_correlation": float(matthews_corrcoef(A__ , A__ , sample_weight=A__ ) ),
}
| 359
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_snake_case = 0
_snake_case = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_snake_case = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_snake_case = i
_snake_case = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
| 0
|
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.0_09
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : int="train" ):
return calculate_hypothesis_value(_UpperCamelCase ,_UpperCamelCase ) - output(
_UpperCamelCase ,_UpperCamelCase )
def a__ ( _UpperCamelCase : Tuple ):
__lowerCamelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Optional[Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[str] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any]=m ):
__lowerCamelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def a__ ( _UpperCamelCase : Optional[Any] ):
__lowerCamelCase = summation_of_cost_derivative(_UpperCamelCase ,_UpperCamelCase ) / m
return cost_derivative_value
def a__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCamelCase = 0.000_002
__lowerCamelCase = 0
__lowerCamelCase = 0
while True:
j += 1
__lowerCamelCase = [0, 0, 0, 0]
for i in range(0 ,len(_UpperCamelCase ) ):
__lowerCamelCase = get_cost_derivative(i - 1 )
__lowerCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase ,_UpperCamelCase ,atol=_UpperCamelCase ,rtol=_UpperCamelCase ,):
break
__lowerCamelCase = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ):
for i in range(len(_UpperCamelCase ) ):
print(('''Actual output value:''', output(_UpperCamelCase ,'''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(_UpperCamelCase ,'''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 330
|
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 330
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _a , _a , unittest.TestCase ):
snake_case__ = CycleDiffusionPipeline
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(snake_case_ )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
lowerCAmelCase = image / 2 + 0.5
if str(snake_case_ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
lowerCAmelCase = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = CycleDiffusionPipeline(**snake_case_ )
lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
lowerCAmelCase = pipe(**snake_case_ )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case_ , 'half' ):
lowerCAmelCase = module.half()
lowerCAmelCase = CycleDiffusionPipeline(**snake_case_ )
lowerCAmelCase = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
lowerCAmelCase = pipe(**snake_case_ )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
lowerCAmelCase = init_image.resize((5_12, 5_12) )
lowerCAmelCase = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder='scheduler' )
lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase = """A black colored car"""
lowerCAmelCase = """A blue colored car"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type='np' , )
lowerCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
lowerCAmelCase = init_image.resize((5_12, 5_12) )
lowerCAmelCase = """CompVis/stable-diffusion-v1-4"""
lowerCAmelCase = DDIMScheduler.from_pretrained(snake_case_ , subfolder='scheduler' )
lowerCAmelCase = CycleDiffusionPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase = """A black colored car"""
lowerCAmelCase = """A blue colored car"""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type='np' , )
lowerCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 371
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309
| 0
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Tuple:
__UpperCAmelCase : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__UpperCAmelCase : Dict = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
__UpperCAmelCase : str = model.state_dict()
def to_tf_var_name(snake_case__ ):
for patt, repl in iter(snake_case__ ):
__UpperCAmelCase : Optional[int] = name.replace(snake_case__, snake_case__ )
return f'''bert/{name}'''
def create_tf_var(snake_case__, snake_case__, snake_case__ ):
__UpperCAmelCase : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
__UpperCAmelCase : Any = tf.get_variable(dtype=snake_case__, shape=tensor.shape, name=snake_case__, initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__UpperCAmelCase : Any = to_tf_var_name(snake_case__ )
__UpperCAmelCase : str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__UpperCAmelCase : List[str] = torch_tensor.T
__UpperCAmelCase : Tuple = create_tf_var(tensor=snake_case__, name=snake_case__, session=snake_case__ )
tf.keras.backend.set_value(snake_case__, snake_case__ )
__UpperCAmelCase : int = session.run(snake_case__ )
print(f'''Successfully created {tf_name}: {np.allclose(snake_case__, snake_case__ )}''' )
__UpperCAmelCase : int = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case__, os.path.join(snake_case__, model_name.replace("-", "_" ) + ".ckpt" ) )
def _UpperCamelCase ( snake_case__=None ) -> Optional[int]:
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--model_name", type=snake_case__, required=snake_case__, help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir", type=snake_case__, default=snake_case__, required=snake_case__, help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path", type=snake_case__, required=snake_case__, help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir", type=snake_case__, required=snake_case__, help="Directory in which to save tensorflow model" )
__UpperCAmelCase : str = parser.parse_args(snake_case__ )
__UpperCAmelCase : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, )
convert_pytorch_checkpoint_to_tf(model=snake_case__, ckpt_dir=args.tf_cache_dir, model_name=args.model_name )
if __name__ == "__main__":
main()
| 157
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
def run_func(snake_case__ ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__, **snake_case__ ):
return func(*snake_case__, **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__, **snake_case__ ):
return func(*snake_case__, **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> ["tf.Tensor"]:
__UpperCAmelCase : str = random.Random()
__UpperCAmelCase : str = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__, shape=(batch_size, sequence_length), dtype=tf.intaa )
class _snake_case ( _lowercase ):
lowerCamelCase__: TensorFlowBenchmarkArguments
lowerCamelCase__: PretrainedConfig
lowerCamelCase__: str = "TensorFlow"
@property
def _lowerCamelCase ( self: int ) -> Any:
return tf.__version__
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> float:
# initialize GPU on separate process
__UpperCAmelCase : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_inference )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> float:
__UpperCAmelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : Dict = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_train )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_inference )
def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
__UpperCAmelCase : int = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_train )
def _lowerCamelCase ( self: int , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> Callable[[], None]:
__UpperCAmelCase : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__UpperCAmelCase : int = (
hasattr(__lowerCamelCase , "architectures" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase : int = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase : Dict = __import__("transformers" , fromlist=[model_class] )
__UpperCAmelCase : str = getattr(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__UpperCAmelCase : int = TF_MODEL_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase : List[str] = config.vocab_size if hasattr(__lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
__UpperCAmelCase : Dict = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , training=__lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCamelCase , training=__lowerCamelCase )
__UpperCAmelCase : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> Callable[[], None]:
__UpperCAmelCase : Any = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__UpperCAmelCase : Tuple = (
hasattr(__lowerCamelCase , "architectures" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase : Optional[Any] = __import__("transformers" , fromlist=[model_class] )
__UpperCAmelCase : int = getattr(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__UpperCAmelCase : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase : List[Any] = config.vocab_size if hasattr(__lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
__UpperCAmelCase : Dict = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
__UpperCAmelCase : Optional[Any] = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
__UpperCAmelCase : List[Any] = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
__UpperCAmelCase : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__UpperCAmelCase : List[str] = timeit.repeat(
__lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
__UpperCAmelCase : Union[str, Any] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
__UpperCAmelCase : Union[str, Any] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
__UpperCAmelCase : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__UpperCAmelCase : List[Any] = nvml.nvmlDeviceGetMemoryInfo(__lowerCamelCase )
__UpperCAmelCase : List[Any] = meminfo.used
__UpperCAmelCase : List[Any] = Memory(__lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
__UpperCAmelCase : Tuple = None
else:
__UpperCAmelCase : str = measure_peak_memory_cpu(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = Memory(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
__UpperCAmelCase : str = stop_memory_tracing(__lowerCamelCase )
if memory is None:
__UpperCAmelCase : Tuple = summary.total
else:
__UpperCAmelCase : Union[str, Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 157
| 1
|
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(UpperCAmelCase_ )
while cur > 1:
# Find the maximum number in arr
lowercase__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowercase__ = arr[mi::-1] + arr[mi + 1 : len(UpperCAmelCase_ )]
# Reverse whole list
lowercase__ = arr[cur - 1 :: -1] + arr[cur : len(UpperCAmelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
_snake_case = input('Enter numbers separated by a comma:\n').strip()
_snake_case = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 353
|
def _a ( SCREAMING_SNAKE_CASE = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93
| 0
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase__ = """__DUMMY_TRANSFORMERS_USER__"""
lowerCAmelCase__ = """Dummy User"""
lowerCAmelCase__ = """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"""
lowerCAmelCase__ = """https://hub-ci.huggingface.co"""
lowerCAmelCase__ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
lowerCAmelCase__ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
lowerCAmelCase__ = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , SCREAMING_SNAKE_CASE_ )
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , SCREAMING_SNAKE_CASE_ )
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , SCREAMING_SNAKE_CASE_ )
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple ) -> Optional[int]:
'''simple docstring'''
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
return HfApi(endpoint=SCREAMING_SNAKE_CASE_ )
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: HfApi ) -> str:
'''simple docstring'''
A__ = HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[str]:
'''simple docstring'''
def _cleanup_repo(SCREAMING_SNAKE_CASE_: Tuple ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> str:
'''simple docstring'''
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE_: Any ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE_ )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: HfApi , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str] ) -> Any:
'''simple docstring'''
A__ = F'repo_txt_data-{int(time.time() * 10e3 )}'
A__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE_ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo="data/text_data.txt" , repo_id=SCREAMING_SNAKE_CASE_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Any:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: HfApi , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Any ) -> List[Any]:
'''simple docstring'''
A__ = F'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
A__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE_ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo="data.zip" , repo_id=SCREAMING_SNAKE_CASE_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: HfApi , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[int] ) -> List[str]:
'''simple docstring'''
A__ = F'repo_zipped_img_data-{int(time.time() * 10e3 )}'
A__ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type="dataset" , private=SCREAMING_SNAKE_CASE_ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE_ , path_or_fileobj=str(SCREAMING_SNAKE_CASE_ ) , path_in_repo="data.zip" , repo_id=SCREAMING_SNAKE_CASE_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Any:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 68
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 42
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self : Optional[int], lowerCAmelCase : int = 32, lowerCAmelCase : int = 64, lowerCAmelCase : int = 20, lowerCAmelCase : int = 768, lowerCAmelCase : Optional[Any]=77, lowerCAmelCase : Tuple=4, lowerCAmelCase : float = 0.0, lowerCAmelCase : str = "silu", lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = "linear", lowerCAmelCase : Optional[str] = "prd", lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, ) -> List[Any]:
super().__init__()
lowercase : List[Any] = num_attention_heads
lowercase : int = attention_head_dim
lowercase : List[Any] = num_attention_heads * attention_head_dim
lowercase : Tuple = additional_embeddings
lowercase : Dict = time_embed_dim or inner_dim
lowercase : Optional[Any] = embedding_proj_dim or embedding_dim
lowercase : int = clip_embed_dim or embedding_dim
lowercase : List[str] = Timesteps(lowerCAmelCase, lowerCAmelCase, 0 )
lowercase : List[str] = TimestepEmbedding(lowerCAmelCase, lowerCAmelCase, out_dim=lowerCAmelCase, act_fn=lowerCAmelCase )
lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase )
if embedding_proj_norm_type is None:
lowercase : str = None
elif embedding_proj_norm_type == "layer":
lowercase : Tuple = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase )
if encoder_hid_proj_type is None:
lowercase : Optional[int] = None
elif encoder_hid_proj_type == "linear":
lowercase : Dict = nn.Linear(lowerCAmelCase, lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowercase : Dict = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, lowerCAmelCase ) )
if added_emb_type == "prd":
lowercase : Union[str, Any] = nn.Parameter(torch.zeros(1, 1, lowerCAmelCase ) )
elif added_emb_type is None:
lowercase : str = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowercase : Dict = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dropout=lowerCAmelCase, activation_fn='gelu', attention_bias=lowerCAmelCase, )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
lowercase : str = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
lowercase : Optional[int] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
lowercase : int = nn.LayerNorm(lowerCAmelCase )
lowercase : str = nn.Linear(lowerCAmelCase, lowerCAmelCase )
lowercase : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 )
causal_attention_mask.triu_(1 )
lowercase : List[str] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask', lowerCAmelCase, persistent=lowerCAmelCase )
lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase ( self : Tuple ) -> Dict[str, AttentionProcessor]:
lowercase : Any = {}
def fn_recursive_add_processors(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(lowerCAmelCase, 'set_processor' ):
lowercase : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
return processors
def lowercase ( self : Union[str, Any], lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
lowercase : str = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Union[str, Any] ):
if hasattr(lowerCAmelCase, 'set_processor' ):
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Union[torch.Tensor, float, int], lowerCAmelCase : torch.FloatTensor, lowerCAmelCase : Optional[torch.FloatTensor] = None, lowerCAmelCase : Optional[torch.BoolTensor] = None, lowerCAmelCase : bool = True, ) -> List[Any]:
lowercase : Optional[Any] = hidden_states.shape[0]
lowercase : Union[str, Any] = timestep
if not torch.is_tensor(lowerCAmelCase ):
lowercase : List[str] = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
lowercase : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase : Optional[int] = timesteps * torch.ones(lowerCAmelCase, dtype=timesteps.dtype, device=timesteps.device )
lowercase : Dict = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowercase : Optional[int] = timesteps_projected.to(dtype=self.dtype )
lowercase : Any = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
lowercase : Any = self.embedding_proj_norm(lowerCAmelCase )
lowercase : List[str] = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowercase : str = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowercase : Optional[Any] = self.proj_in(lowerCAmelCase )
lowercase : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
lowercase : Dict = []
lowercase : Optional[int] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowercase : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowercase : Union[str, Any] = hidden_states[:, None, :]
lowercase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowercase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase, -1, -1 )
additional_embeds.append(lowerCAmelCase )
lowercase : Union[str, Any] = torch.cat(
lowerCAmelCase, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowercase : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowercase : List[Any] = F.pad(
lowerCAmelCase, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
lowercase : str = hidden_states + positional_embeddings
if attention_mask is not None:
lowercase : Tuple = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
lowercase : List[Any] = F.pad(lowerCAmelCase, (0, self.additional_embeddings), value=0.0 )
lowercase : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowercase : Union[str, Any] = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
lowercase : List[Any] = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
lowercase : Tuple = block(lowerCAmelCase, attention_mask=lowerCAmelCase )
lowercase : Optional[Any] = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
lowercase : Optional[Any] = hidden_states[:, -1]
else:
lowercase : Any = hidden_states[:, additional_embeddings_len:]
lowercase : Optional[int] = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : Dict ) -> Dict:
lowercase : int = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 255
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : int = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : str = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE__ : Any = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE__ : List[str] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class lowerCAmelCase__ ( __lowercase ):
a__ : int = VOCAB_FILES_NAMES
a__ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ : Dict = DPRContextEncoderTokenizer
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = VOCAB_FILES_NAMES
a__ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ : int = DPRQuestionEncoderTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE__ : Optional[int] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE__ : Optional[int] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__lowercase )
class lowerCAmelCase__ :
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
elif titles is None or texts is None:
__lowerCamelCase = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles]
__lowerCamelCase = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts]
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE__ ) == len(
SCREAMING_SNAKE_CASE__ ), f'''There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.'''
__lowerCamelCase = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['''input_ids''']
__lowerCamelCase = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['''input_ids''']
__lowerCamelCase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
}
if return_attention_mask is not False:
__lowerCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCamelCase = attention_mask
return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : BatchEncoding , SCREAMING_SNAKE_CASE__ : DPRReaderOutput , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 4 , ) -> List[DPRSpanPrediction]:
__lowerCamelCase = reader_input['''input_ids''']
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reader_output[:3]
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ )
__lowerCamelCase = []
for doc_id in sorted_docs:
__lowerCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCamelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ) -> List[DPRSpanPrediction]:
__lowerCamelCase = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
__lowerCamelCase = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowercase )
class lowerCAmelCase__ ( __lowercase , __lowercase ):
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
a__ : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
a__ : Optional[int] = ["""input_ids""", """attention_mask"""]
a__ : List[Any] = DPRReaderTokenizer
| 339
|
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE__ : Any = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __magic_name__ ( ) -> Any:
__lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowerCamelCase = g.get_repo('''huggingface/transformers''' )
__lowerCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
__lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 339
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LayoutLMv2FeatureExtractor"]
__A = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270
| 0
|
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE :
def __init__( self : str , lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = data
# Initialize hash values
__lowerCAmelCase : str = [
0X6a09e667,
0Xbb67ae85,
0X3c6ef372,
0Xa54ff53a,
0X510e527f,
0X9b05688c,
0X1f83d9ab,
0X5be0cd19,
]
# Initialize round constants
__lowerCAmelCase : Union[str, Any] = [
0X428a2f98,
0X71374491,
0Xb5c0fbcf,
0Xe9b5dba5,
0X3956c25b,
0X59f111f1,
0X923f82a4,
0Xab1c5ed5,
0Xd807aa98,
0X12835b01,
0X243185be,
0X550c7dc3,
0X72be5d74,
0X80deb1fe,
0X9bdc06a7,
0Xc19bf174,
0Xe49b69c1,
0Xefbe4786,
0X0fc19dc6,
0X240ca1cc,
0X2de92c6f,
0X4a7484aa,
0X5cb0a9dc,
0X76f988da,
0X983e5152,
0Xa831c66d,
0Xb00327c8,
0Xbf597fc7,
0Xc6e00bf3,
0Xd5a79147,
0X06ca6351,
0X14292967,
0X27b70a85,
0X2e1b2138,
0X4d2c6dfc,
0X53380d13,
0X650a7354,
0X766a0abb,
0X81c2c92e,
0X92722c85,
0Xa2bfe8a1,
0Xa81a664b,
0Xc24b8b70,
0Xc76c51a3,
0Xd192e819,
0Xd6990624,
0Xf40e3585,
0X106aa070,
0X19a4c116,
0X1e376c08,
0X2748774c,
0X34b0bcb5,
0X391c0cb3,
0X4ed8aa4a,
0X5b9cca4f,
0X682e6ff3,
0X748f82ee,
0X78a5636f,
0X84c87814,
0X8cc70208,
0X90befffa,
0Xa4506ceb,
0Xbef9a3f7,
0Xc67178f2,
]
__lowerCAmelCase : List[str] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def SCREAMING_SNAKE_CASE ( lowerCAmelCase : int ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = B"""\x80""" + (B"""\x00""" * (63 - (len(lowerCamelCase__ ) + 8) % 64))
__lowerCAmelCase : str = struct.pack(""">Q""" , (len(lowerCamelCase__ ) * 8) )
return data + padding + big_endian_integer
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__lowerCAmelCase : List[str] = list(struct.unpack(""">16L""" , lowerCamelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__lowerCAmelCase : Dict = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__lowerCAmelCase : Union[str, Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__lowerCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100000000
# Compression
__lowerCAmelCase : int = self.ror(lowerCamelCase__ , 6 ) ^ self.ror(lowerCamelCase__ , 11 ) ^ self.ror(lowerCamelCase__ , 25 )
__lowerCAmelCase : List[str] = (e & f) ^ ((~e & 0Xffffffff) & g)
__lowerCAmelCase : Any = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100000000
__lowerCAmelCase : Tuple = self.ror(lowerCamelCase__ , 2 ) ^ self.ror(lowerCamelCase__ , 13 ) ^ self.ror(lowerCamelCase__ , 22 )
__lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
__lowerCAmelCase : List[str] = (sa + maj) % 0X100000000
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = (
g,
f,
e,
((d + tempa) % 0X100000000),
c,
b,
a,
((tempa + tempa) % 0X100000000),
)
__lowerCAmelCase : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
__lowerCAmelCase : Optional[int] = [
((element + mutated_hash_values[index]) % 0X100000000)
for index, element in enumerate(self.hashes )
]
__lowerCAmelCase : List[Any] = """""".join([hex(lowerCamelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return 0Xffffffff & (value << (32 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
import hashlib
__lowerCAmelCase : Union[str, Any] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowerCamelCase__ ).hash , hashlib.shaaaa(lowerCamelCase__ ).hexdigest() )
def snake_case_ () -> None:
import doctest
doctest.testmod()
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
__lowerCAmelCase : str = parser.parse_args()
__lowerCAmelCase : Optional[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
__lowerCAmelCase : Optional[Any] = f.read()
else:
__lowerCAmelCase : List[Any] = bytes(lowercase_ , """utf-8""" )
print(SHAaaa(lowercase_ ).hash )
if __name__ == "__main__":
main()
| 357
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase : ClassVar[Features] =Features({"audio": Audio()} )
lowerCamelCase : ClassVar[Features] =Features({"labels": ClassLabel} )
lowerCamelCase : str ="audio"
lowerCamelCase : str ="labels"
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__lowerCAmelCase : Tuple = copy.deepcopy(self )
__lowerCAmelCase : Tuple = self.label_schema.copy()
__lowerCAmelCase : Union[str, Any] = features[self.label_column]
__lowerCAmelCase : Any = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 139
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = IFImgaImgSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase ( self : str ):
return self._get_superresolution_dummy_components()
def lowerCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : List[str]=0 ):
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : List[str] = torch.manual_seed(snake_case_ )
else:
snake_case__ : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase ( self : Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase ( self : List[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase ( self : Dict ):
self._test_save_load_local()
def lowerCamelCase ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 35
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 0
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : List[str]=None , UpperCamelCase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(poly_a or [0] )[:]
__UpperCAmelCase : Optional[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__UpperCAmelCase : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__UpperCAmelCase : Dict = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__UpperCAmelCase : List[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__UpperCAmelCase : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__UpperCAmelCase : Dict = self.__multiply()
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(UpperCamelCase ) <= 1:
return dft[0]
#
__UpperCAmelCase : Dict = self.c_max_length // 2
while next_ncol > 0:
__UpperCAmelCase : List[str] = [[] for i in range(UpperCamelCase )]
__UpperCAmelCase : Optional[Any] = self.root**next_ncol
# First half of next step
__UpperCAmelCase : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__UpperCAmelCase : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__UpperCAmelCase : int = new_dft
__UpperCAmelCase : Tuple = next_ncol // 2
return dft[0]
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.__dft("""A""" )
__UpperCAmelCase : Any = self.__dft("""B""" )
__UpperCAmelCase : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__UpperCAmelCase : Tuple = 2
while next_ncol <= self.c_max_length:
__UpperCAmelCase : Dict = [[] for i in range(UpperCamelCase )]
__UpperCAmelCase : str = self.root ** (next_ncol // 2)
__UpperCAmelCase : int = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__UpperCAmelCase : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
__UpperCAmelCase : int = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__UpperCAmelCase : List[Any] = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__UpperCAmelCase : str = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = LEDTokenizer
__a = LEDTokenizerFast
__a = True
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowerCamelCase__ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , UpperCamelCase )
self.assertIn("""attention_mask""" , UpperCamelCase )
self.assertNotIn("""labels""" , UpperCamelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : str = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""]
__UpperCAmelCase : int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[Any] = inputs["""input_ids"""]
__UpperCAmelCase : List[str] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""]
__UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase )
__UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]]
__UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Any = """A, <mask> AllenNLP sentence."""
__UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 320
| 1
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a__ :
def __init__( self : str , a : Dict ):
"""simple docstring"""
__lowerCamelCase = data
__lowerCamelCase = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Union[str, Any] , a : Any ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
__lowerCamelCase = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = list(struct.unpack('''>16L''' , a ) ) + [0] * 64
for i in range(16 , 80 ):
__lowerCamelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.padding()
__lowerCamelCase = self.split_blocks()
for block in self.blocks:
__lowerCamelCase = self.expand_block(a )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
__lowerCamelCase = (b & c) | ((~b) & d)
__lowerCamelCase = 0x5A827999
elif 20 <= i < 40:
__lowerCamelCase = b ^ c ^ d
__lowerCamelCase = 0x6ED9EBA1
elif 40 <= i < 60:
__lowerCamelCase = (b & c) | (b & d) | (c & d)
__lowerCamelCase = 0x8F1BBCDC
elif 60 <= i < 80:
__lowerCamelCase = b ^ c ^ d
__lowerCamelCase = 0xCA62C1D6
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (
self.rotate(a , 5 ) + f + e + k + expanded_block[i] & 0xFFFFFFFF,
a,
self.rotate(a , 30 ),
c,
d,
)
__lowerCamelCase = (
self.h[0] + a & 0xFFFFFFFF,
self.h[1] + b & 0xFFFFFFFF,
self.h[2] + c & 0xFFFFFFFF,
self.h[3] + d & 0xFFFFFFFF,
self.h[4] + e & 0xFFFFFFFF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowerCAmelCase ( ) -> List[str]:
__lowerCamelCase = B'''Test String'''
assert SHAaHash(UpperCamelCase__ ).final_hash() == hashlib.shaa(UpperCamelCase__ ).hexdigest() # noqa: S324
def __lowerCAmelCase ( ) -> List[str]:
__lowerCamelCase = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__lowerCamelCase = f.read()
else:
__lowerCamelCase = bytes(UpperCamelCase__ , '''utf-8''' )
print(SHAaHash(UpperCamelCase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 67
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 93
| 0
|
'''simple docstring'''
def __a ( _UpperCamelCase: int , _UpperCamelCase: int ) -> str:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_UpperCamelCase , int(b / 2 ) ) * actual_power(_UpperCamelCase , int(b / 2 ) )
else:
return a * actual_power(_UpperCamelCase , int(b / 2 ) ) * actual_power(_UpperCamelCase , int(b / 2 ) )
def __a ( _UpperCamelCase: int , _UpperCamelCase: int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(_UpperCamelCase , _UpperCamelCase )
return actual_power(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 142
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ : Any = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def __a ( _UpperCamelCase: str , _UpperCamelCase: Union[str, Any]=100 , _UpperCamelCase: List[str]=" " ) -> List[str]:
"""simple docstring"""
_snake_case = text.split(_UpperCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )]
def __a ( _UpperCamelCase: dict ) -> dict:
"""simple docstring"""
_snake_case , _snake_case = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(_UpperCamelCase ):
titles.append(title if title is not None else "" )
texts.append(_UpperCamelCase )
return {"title": titles, "text": texts}
def __a ( _UpperCamelCase: dict , _UpperCamelCase: DPRContextEncoder , _UpperCamelCase: DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
_snake_case = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_UpperCamelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_snake_case = ctx_encoder(input_ids.to(device=_UpperCamelCase ) , return_dict=_UpperCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __a ( _UpperCamelCase: "RagExampleArguments" , _UpperCamelCase: "ProcessingArguments" , _UpperCamelCase: "IndexHnswArguments" , ) -> Dict:
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_snake_case = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_snake_case = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_snake_case = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCamelCase )
_snake_case = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_snake_case = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_snake_case = dataset.map(
partial(_UpperCamelCase , ctx_encoder=_UpperCamelCase , ctx_tokenizer=_UpperCamelCase ) , batched=_UpperCamelCase , batch_size=processing_args.batch_size , features=_UpperCamelCase , )
# And finally save your dataset
_snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(_UpperCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_snake_case = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=_UpperCamelCase )
# And save the index
_snake_case = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(_UpperCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : str = field(
default=str(Path(__lowerCAmelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=__lowerCAmelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=str(Path(__lowerCAmelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=__lowerCAmelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : int = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ : List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 142
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
UpperCAmelCase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A )
class __lowerCAmelCase :
def __call__( self : Dict , A : int , A : Optional[str] = None , A : Optional[str] = None , A : Union[bool, str] = False , A : Union[bool, str] = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = None , **A : int , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
_UpperCAmelCase = titles if not isinstance(A , A) else [titles]
_UpperCAmelCase = texts if not isinstance(A , A) else [texts]
_UpperCAmelCase = len(A)
_UpperCAmelCase = questions if not isinstance(A , A) else [questions] * n_passages
assert len(A) == len(
A), F"There should be as many titles than texts but got {len(A)} titles and {len(A)} texts."
_UpperCAmelCase = super().__call__(A , A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = super().__call__(A , add_special_tokens=A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A)
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_UpperCAmelCase = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A)
def _lowerCamelCase ( self : str , A : BatchEncoding , A : DPRReaderOutput , A : int = 16 , A : int = 64 , A : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = reader_input['input_ids']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(A)
_UpperCAmelCase = sorted(range(A) , reverse=A , key=relevance_logits.__getitem__)
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id)
else:
_UpperCAmelCase = len(A)
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(A) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self : int , A : List[int] , A : List[int] , A : int , A : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = []
for start_index, start_score in enumerate(A):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_UpperCAmelCase = sorted(A , key=lambda A: x[1] , reverse=A)
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
_UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(A) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class __lowerCAmelCase ( A , A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = DPRReaderTokenizer
| 339
|
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: float, a_: float ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
'''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded)
| 17
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with open(lowerCAmelCase ) as metadata_file:
_lowerCAmelCase = json.load(lowerCAmelCase )
_lowerCAmelCase = LukeConfig(use_entity_aware_attention=lowerCAmelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
_lowerCAmelCase = load_original_entity_vocab(lowerCAmelCase )
# add an entry for [MASK2]
_lowerCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowerCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase = AddedToken("""<ent>""" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
_lowerCAmelCase = AddedToken("""<ent2>""" , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """r""" ) as f:
_lowerCAmelCase = json.load(lowerCAmelCase )
_lowerCAmelCase = """MLukeTokenizer"""
with open(os.path.join(lowerCAmelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase )
# Initialize the embeddings of the special tokens
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
_lowerCAmelCase = state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_lowerCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowerCAmelCase = state_dict[bias_name]
_lowerCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_lowerCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_lowerCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase = f"encoder.layer.{layer_index}.attention.self."
_lowerCAmelCase = state_dict[prefix + matrix_name]
_lowerCAmelCase = state_dict[prefix + matrix_name]
_lowerCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowerCAmelCase = state_dict["""entity_predictions.bias"""]
_lowerCAmelCase = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowerCAmelCase = LukeForMaskedLM(config=lowerCAmelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
_lowerCAmelCase = state_dict[key]
else:
_lowerCAmelCase = state_dict[key]
_lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if set(lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase , task="""entity_classification""" )
_lowerCAmelCase = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
_lowerCAmelCase = (0, 9)
_lowerCAmelCase = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowerCAmelCase = model(**lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase = torch.Size((1, 33, 7_68) )
_lowerCAmelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase = torch.Size((1, 1, 7_68) )
_lowerCAmelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowerCAmelCase = MLukeTokenizer.from_pretrained(lowerCAmelCase )
_lowerCAmelCase = """Tokyo is the capital of <mask>."""
_lowerCAmelCase = (24, 30)
_lowerCAmelCase = tokenizer(lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowerCAmelCase = model(**lowerCAmelCase )
_lowerCAmelCase = encoding["""input_ids"""][0].tolist()
_lowerCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
_lowerCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase )
_lowerCAmelCase = outputs.entity_logits[0][0].argmax().item()
_lowerCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCAmelCase ) )
model.save_pretrained(lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ["""[MASK]""", """[PAD]""", """[UNK]"""]
_lowerCAmelCase = [json.loads(lowerCAmelCase ) for line in open(lowerCAmelCase )]
_lowerCAmelCase = {}
for entry in data:
_lowerCAmelCase = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowerCAmelCase = entity_id
break
_lowerCAmelCase = f"{language}:{entity_name}"
_lowerCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
A__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
A__ : Union[str, Any] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 70
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( _a , _a , unittest.TestCase ):
_A : Tuple = AutoencoderKL
_A : Union[str, Any] = '''sample'''
_A : int = 1E-2
@property
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Tuple = 4
SCREAMING_SNAKE_CASE:Dict = 3
SCREAMING_SNAKE_CASE:str = (32, 32)
SCREAMING_SNAKE_CASE:List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": image}
@property
def __UpperCamelCase ( self : Any ):
return (3, 32, 32)
@property
def __UpperCamelCase ( self : int ):
return (3, 32, 32)
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
SCREAMING_SNAKE_CASE:List[Any] = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self : int ):
pass
def __UpperCamelCase ( self : Tuple ):
pass
@unittest.skipIf(torch_device == "mps" ,"Gradient checkpointing skipped on MPS" )
def __UpperCamelCase ( self : str ):
# enable deterministic behavior for gradient checkpointing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE:Optional[Any] = self.model_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
assert not model.is_gradient_checkpointing and model.training
SCREAMING_SNAKE_CASE:str = model(**SCREAMING_SNAKE_CASE__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
SCREAMING_SNAKE_CASE:str = torch.randn_like(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
SCREAMING_SNAKE_CASE:List[Any] = self.model_class(**SCREAMING_SNAKE_CASE__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
SCREAMING_SNAKE_CASE:Optional[int] = model_a(**SCREAMING_SNAKE_CASE__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
SCREAMING_SNAKE_CASE:List[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
SCREAMING_SNAKE_CASE:Dict = dict(model.named_parameters() )
SCREAMING_SNAKE_CASE:Tuple = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info["missing_keys"] ) ,0 )
model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
SCREAMING_SNAKE_CASE:int = model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
if torch_device == "mps":
SCREAMING_SNAKE_CASE:str = torch.manual_seed(0 )
else:
SCREAMING_SNAKE_CASE:Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE:Any = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
SCREAMING_SNAKE_CASE:Optional[int] = image.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,sample_posterior=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ).sample
SCREAMING_SNAKE_CASE:str = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
SCREAMING_SNAKE_CASE:List[Any] = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
SCREAMING_SNAKE_CASE:Dict = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ):
return F'''gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE__ ) for s in shape] )}.npy'''
def __UpperCamelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : Any=(4, 3, 512, 512) ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
SCREAMING_SNAKE_CASE:str = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE:List[Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) ) ).to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return image
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict="CompVis/stable-diffusion-v1-4" ,SCREAMING_SNAKE_CASE__ : int=False ):
SCREAMING_SNAKE_CASE:Union[str, Any] = "fp16" if fpaa else None
SCREAMING_SNAKE_CASE:Optional[Any] = torch.floataa if fpaa else torch.floataa
SCREAMING_SNAKE_CASE:Union[str, Any] = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE__ ,subfolder="vae" ,torch_dtype=SCREAMING_SNAKE_CASE__ ,revision=SCREAMING_SNAKE_CASE__ ,)
model.to(SCREAMING_SNAKE_CASE__ ).eval()
return model
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]=0 ):
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE__ )
return torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:Any = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:Tuple = self.get_sd_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[str] = model(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,sample_posterior=SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE:List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:Tuple = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,fpaa=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,sample_posterior=SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE:int = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:List[str] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_sd_image(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).sample
assert sample.shape == image.shape
SCREAMING_SNAKE_CASE:int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
SCREAMING_SNAKE_CASE:str = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:Any = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:List[str] = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE:List[str] = sample[-1, -2:, :2, -2:].flatten().cpu()
SCREAMING_SNAKE_CASE:Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:int = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,shape=(3, 4, 64, 64) ,fpaa=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
SCREAMING_SNAKE_CASE:Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
SCREAMING_SNAKE_CASE:Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="xformers is not required when using PyTorch 2.0." )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:str = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,shape=(3, 4, 64, 64) ,fpaa=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[str] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="xformers is not required when using PyTorch 2.0." )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:List[Any] = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:List[Any] = self.get_sd_image(SCREAMING_SNAKE_CASE__ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[Any] = model.decode(SCREAMING_SNAKE_CASE__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
SCREAMING_SNAKE_CASE:int = model.decode(SCREAMING_SNAKE_CASE__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:int = self.get_sd_vae_model()
SCREAMING_SNAKE_CASE:List[Any] = self.get_sd_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = self.get_generator(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[Any] = model.encode(SCREAMING_SNAKE_CASE__ ).latent_dist
SCREAMING_SNAKE_CASE:int = dist.sample(generator=SCREAMING_SNAKE_CASE__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
SCREAMING_SNAKE_CASE:List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
SCREAMING_SNAKE_CASE:int = torch.tensor(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__ )
| 139
| 0
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = PriorTransformer
SCREAMING_SNAKE_CASE_ : Optional[int] = """hidden_states"""
@property
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __A ( self , lowerCAmelCase__=0 ) -> List[str]:
torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def __A ( self ) -> Optional[int]:
return (4, 8)
@property
def __A ( self ) -> Optional[int]:
return (4, 8)
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE = self.model_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase__ )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
SCREAMING_SNAKE_CASE = model.to(lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , 'set_default_attn_processor' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = output[0, :5].flatten().cpu()
print(lowerCAmelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1e-2 ) )
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self , lowerCAmelCase__=1 , lowerCAmelCase__=768 , lowerCAmelCase__=77 , lowerCAmelCase__=0 ) -> Any:
torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = embedding_dim
SCREAMING_SNAKE_CASE = num_embeddings
SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __A ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
SCREAMING_SNAKE_CASE = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_seed_input(seed=lowerCAmelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )[0]
assert list(sample.shape ) == [1, 768]
SCREAMING_SNAKE_CASE = sample[0, :8].flatten().cpu()
print(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
| 38
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCamelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCamelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCamelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCamelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCamelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCamelCase = np.expand_dims(test_image, axis=0)
__UpperCamelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCamelCase = '''Normal'''
if result[0][0] == 1:
__UpperCamelCase = '''Abnormality detected'''
| 38
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def A_ ( ):
"""simple docstring"""
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ):
"""simple docstring"""
_a = '''rougeLsum'''
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ):
"""simple docstring"""
_a = ['''rouge1''', '''rouge2''', '''rougeL''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase )
assert score_sep == score_no_sep
def A_ ( ):
"""simple docstring"""
_a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum''']
_a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def A_ ( ):
"""simple docstring"""
_a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
_a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
| 320
| 1
|
class _UpperCAmelCase :
def __init__( self : Optional[Any] ):
snake_case_ : int = """"""
snake_case_ : str = """"""
snake_case_ : Dict = []
def _snake_case ( self : Tuple , lowercase_ : int , lowercase_ : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case_ : Union[str, Any] = self.__min_dist_top_down_dp(lowercase_ , n - 1 )
snake_case_ : List[Any] = self.__min_dist_top_down_dp(m - 1 , lowercase_ )
snake_case_ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case_ : Optional[Any] = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
def _snake_case ( self : Dict , lowercase_ : str , lowercase_ : str ):
snake_case_ : Any = worda
snake_case_ : Union[str, Any] = worda
snake_case_ : Optional[int] = [[-1 for _ in range(len(lowercase_ ) )] for _ in range(len(lowercase_ ) )]
return self.__min_dist_top_down_dp(len(lowercase_ ) - 1 , len(lowercase_ ) - 1 )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : str ):
snake_case_ : Any = worda
snake_case_ : int = worda
snake_case_ : str = len(lowercase_ )
snake_case_ : int = len(lowercase_ )
snake_case_ : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case_ : Tuple = j
elif j == 0: # second string is empty
snake_case_ : str = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case_ : Any = self.dp[i - 1][j - 1]
else:
snake_case_ : str = self.dp[i][j - 1]
snake_case_ : Union[str, Any] = self.dp[i - 1][j]
snake_case_ : List[Any] = self.dp[i - 1][j - 1]
snake_case_ : int = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
if __name__ == "__main__":
lowercase__ : int = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowercase__ : List[str] = input('''Enter the first string: ''').strip()
lowercase__ : Union[str, Any] = input('''Enter the second string: ''').strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 371
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ : str = get_logger(__name__)
lowercase__ : List[str] = Path(__file__).parent / '''model_card_template.md'''
lowercase__ : Union[str, Any] = uuida().hex
lowercase__ : Tuple = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __lowercase ( _a = None ):
snake_case_ : List[str] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_a , _a ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_a , _a ):
ua += "; " + user_agent
return ua
def __lowercase ( _a , _a = None , _a = None ):
if token is None:
snake_case_ : Union[str, Any] = HfFolder.get_token()
if organization is None:
snake_case_ : int = whoami(_a )['''name''']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def __lowercase ( _a , _a ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_a , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ : Union[str, Any] = args.hub_token if hasattr(_a , '''hub_token''' ) else None
snake_case_ : Dict = get_full_repo_name(_a , token=_a )
snake_case_ : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_a , model_name=_a , repo_name=_a , dataset_name=args.dataset_name if hasattr(_a , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_a , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_a , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_a , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_a , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_a , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_a , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_a , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_a , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ : Tuple = os.path.join(args.output_dir , '''README.md''' )
model_card.save(_a )
def __lowercase ( _a , _a = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ : Tuple = str(Path(_a ).as_posix() )
snake_case_ : int = re.search(r'''snapshots/([^/]+)/''' , _a )
if search is None:
return None
snake_case_ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ : str = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowercase__ : List[Any] = os.path.join(hf_cache_home, '''diffusers''')
def __lowercase ( _a = None , _a = None ):
if new_cache_dir is None:
snake_case_ : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ : List[str] = old_diffusers_cache
snake_case_ : Union[str, Any] = Path(_a ).expanduser()
snake_case_ : str = Path(_a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ : List[Any] = new_cache_dir / old_blob_path.relative_to(_a )
new_blob_path.parent.mkdir(parents=_a , exist_ok=_a )
os.replace(_a , _a )
try:
os.symlink(_a , _a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ : Optional[Any] = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowercase__ : Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ : Optional[Any] = int(f.read())
except ValueError:
lowercase__ : Optional[Any] = 0
if cache_version < 1:
lowercase__ : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowercase__ : Optional[Any] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __lowercase ( _a , _a = None ):
if variant is not None:
snake_case_ : str = weights_name.split('''.''' )
snake_case_ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
snake_case_ : List[Any] = '''.'''.join(_a )
return weights_name
def __lowercase ( _a , *,
_a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a=None , ):
snake_case_ : Dict = str(_a )
if os.path.isfile(_a ):
return pretrained_model_name_or_path
elif os.path.isdir(_a ):
if os.path.isfile(os.path.join(_a , _a ) ):
# Load from a PyTorch checkpoint
snake_case_ : Dict = os.path.join(_a , _a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_a , _a , _a ) ):
snake_case_ : List[Any] = os.path.join(_a , _a , _a )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ : str = hf_hub_download(
_a , filename=_add_variant(_a , _a ) , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _a , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_a , _a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_a , _a )}' so that the correct variant file can be added." , _a , )
try:
# 2. Load model file as usual
snake_case_ : Tuple = hf_hub_download(
_a , filename=_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 155
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase__ : str = LxmertConfig.from_json_file(UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
lowerCamelCase__ : List[Any] = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_A : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 142
|
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , A : Any ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = data
lowerCamelCase__ : Any = None
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->str:
lowerCamelCase__ : Any = None
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : str = self.head
while temp is not None:
print(temp.data , end=''' ''' )
lowerCamelCase__ : Dict = temp.next
print()
def __lowerCamelCase ( self : Dict , A : Any ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = Node(A )
lowerCamelCase__ : Dict = self.head
lowerCamelCase__ : List[str] = new_node
def __lowerCamelCase ( self : Optional[int] , A : int , A : Tuple ) ->List[Any]:
if node_data_a == node_data_a:
return
else:
lowerCamelCase__ : Tuple = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Union[str, Any] = node_a.next
lowerCamelCase__ : int = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase__ : Optional[int] = node_a.next
if node_a is None or node_a is None:
return
lowerCamelCase__ , lowerCamelCase__ : str = node_a.data, node_a.data
if __name__ == "__main__":
_A : List[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 142
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 358
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 0
|
"""simple docstring"""
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density")
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus")
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '__DUMMY_TRANSFORMERS_USER__'
_a = 'Dummy User'
_a = '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'
_a = 'https://hub-ci.huggingface.co'
_a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( UpperCamelCase_ : List[Any]) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
HfFolder.save_token(UpperCamelCase_)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _A ( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=UpperCamelCase_)
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi) -> List[Any]:
'''simple docstring'''
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
def _cleanup_repo(UpperCamelCase_ : Optional[int]):
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCamelCase_ : Any):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_)
return _temporary_repo
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 17
| 1
|
"""simple docstring"""
class snake_case :
def __init__( self : List[str] , a__ : int , a__ : Any=None , a__ : Dict=None ) -> str:
'''simple docstring'''
_A = data
_A = previous
_A = next_node
def __str__( self : str ) -> str:
'''simple docstring'''
return F"""{self.data}"""
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.data
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return self.next
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
return self.previous
class snake_case :
def __init__( self : Tuple , a__ : Tuple ) -> Optional[int]:
'''simple docstring'''
_A = head
def __iter__( self : str ) -> Optional[Any]:
'''simple docstring'''
return self
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
_A = self.current.get_data()
_A = self.current.get_next()
return value
class snake_case :
def __init__( self : Optional[Any] ) -> str:
'''simple docstring'''
_A = None # First node in list
_A = None # Last node in list
def __str__( self : Dict ) -> List[Any]:
'''simple docstring'''
_A = self.head
_A = []
while current is not None:
nodes.append(current.get_data() )
_A = current.get_next()
return " ".join(str(a__ ) for node in nodes )
def __contains__( self : Optional[Any] , a__ : int ) -> List[Any]:
'''simple docstring'''
_A = self.head
while current:
if current.get_data() == value:
return True
_A = current.get_next()
return False
def __iter__( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return LinkedListIterator(self.head )
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def a_ ( self : int , a__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
_A = node
_A = node
else:
self.insert_before_node(self.head , a__ )
def a_ ( self : int , a__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(a__ )
else:
self.insert_after_node(self.tail , a__ )
def a_ ( self : str , a__ : int ) -> None:
'''simple docstring'''
_A = Node(a__ )
if self.head is None:
self.set_head(a__ )
else:
self.set_tail(a__ )
def a_ ( self : int , a__ : Node , a__ : Node ) -> None:
'''simple docstring'''
_A = node
_A = node.previous
if node.get_previous() is None:
_A = node_to_insert
else:
_A = node_to_insert
_A = node_to_insert
def a_ ( self : List[str] , a__ : Node , a__ : Node ) -> None:
'''simple docstring'''
_A = node
_A = node.next
if node.get_next() is None:
_A = node_to_insert
else:
_A = node_to_insert
_A = node_to_insert
def a_ ( self : Any , a__ : int , a__ : int ) -> None:
'''simple docstring'''
_A = 1
_A = Node(a__ )
_A = self.head
while node:
if current_position == position:
self.insert_before_node(a__ , a__ )
return
current_position += 1
_A = node.next
self.insert_after_node(self.tail , a__ )
def a_ ( self : Union[str, Any] , a__ : int ) -> Node:
'''simple docstring'''
_A = self.head
while node:
if node.get_data() == item:
return node
_A = node.get_next()
raise Exception("Node not found" )
def a_ ( self : int , a__ : Union[str, Any] ) -> int:
'''simple docstring'''
if (node := self.get_node(a__ )) is not None:
if node == self.head:
_A = self.head.get_next()
if node == self.tail:
_A = self.tail.get_previous()
self.remove_node_pointers(a__ )
@staticmethod
def a_ ( a__ : Node ) -> None:
'''simple docstring'''
if node.get_next():
_A = node.previous
if node.get_previous():
_A = node.next
_A = None
_A = None
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
return self.head is None
def a__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def a__ ( __lowercase , __lowercase , __lowercase = 1_6000 ) -> List[str]:
_A = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
_A = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class snake_case :
__UpperCamelCase = field(default=_UpperCamelCase , metadata={'help': 'Name of a dataset from the datasets package'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'})
__UpperCamelCase = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__UpperCamelCase = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__UpperCamelCase = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__UpperCamelCase = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__UpperCamelCase = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class snake_case :
__UpperCamelCase = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
__UpperCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
__UpperCamelCase = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , a__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def a__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_A = feature_extractor.model_input_names[0]
def train_transforms(__lowercase ):
_A = []
for audio in batch[data_args.audio_column_name]:
_A = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowercase ):
_A = [audio["array"] for audio in batch[data_args.audio_column_name]]
_A = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_A = {model_input_name: inputs.get(__lowercase )}
_A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_A = raw_datasets["train"].features[data_args.label_column_name].names
_A , _A = {}, {}
for i, label in enumerate(__lowercase ):
_A = str(__lowercase )
_A = label
# Load the accuracy metric from the datasets package
_A = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowercase ):
_A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
_A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_A = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_A = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
_A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
# Write model card and (optionally) push to hub
_A = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 163
| 1
|
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : int=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :Any = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCamelCase , UpperCamelCase :Optional[Any] = True, True
UpperCamelCase :Dict = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return path
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase :Dict = 0
UpperCamelCase :Dict = -1
for i in range(__magic_name__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCamelCase :List[Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : List[Any] ) -> int:
"""simple docstring"""
UpperCamelCase :str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCamelCase , UpperCamelCase :Optional[int] = check_circuit_or_path(__magic_name__ , __magic_name__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
UpperCamelCase :int = 1
if check == 2:
UpperCamelCase :Optional[int] = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
UpperCamelCase :Optional[int] = dfs(__magic_name__ , __magic_name__ , __magic_name__ )
print(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCamelCase :Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCamelCase :Tuple = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCamelCase :Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCamelCase :List[Any] = {
1: [],
2: []
# all degree is zero
}
UpperCamelCase :List[str] = 10
check_euler(__magic_name__ , __magic_name__ )
check_euler(__magic_name__ , __magic_name__ )
check_euler(__magic_name__ , __magic_name__ )
check_euler(__magic_name__ , __magic_name__ )
check_euler(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 38
|
import re
import string
import numpy as np
import datasets
UpperCAmelCase_ : Dict = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
UpperCAmelCase_ : Any = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
UpperCAmelCase_ : Tuple = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] )
UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] )
else:
UpperCamelCase :Any = np.asarray(__lowerCamelCase )
UpperCamelCase :str = np.asarray(__lowerCamelCase )
if ignore_case:
UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase )
UpperCamelCase :Any = np.char.lower(__lowerCamelCase )
if ignore_punctuation:
UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
if ignore_numbers:
UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :int = predictions == references
return {"exact_match": np.mean(__lowerCamelCase ) * 100}
| 38
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 2
@register_to_config
def __init__( self , _a = 0.02 , _a = 100 , _a = 1.007 , _a = 80 , _a = 0.05 , _a = 50 , ):
"""simple docstring"""
lowerCamelCase = sigma_max
# setable values
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None # sigma(t_i)
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
return sample
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = num_inference_steps
lowerCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCamelCase = torch.from_numpy(_a ).to(_a )
lowerCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCamelCase = torch.tensor(_a , dtype=torch.floataa , device=_a )
def _lowerCAmelCase ( self , _a , _a , _a = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=_a ).to(sample.device )
lowerCamelCase = sigma + gamma * sigma
lowerCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a = True , ):
"""simple docstring"""
lowerCamelCase = sample_hat + sigma_hat * model_output
lowerCamelCase = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a = True , ):
"""simple docstring"""
lowerCamelCase = sample_prev + sigma_prev * model_output
lowerCamelCase = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
raise NotImplementedError()
| 364
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( snake_case__ ) -> List[str]:
lowerCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def a__ ( snake_case__ ) -> int:
lowerCamelCase , lowerCamelCase = emb.weight.shape
lowerCamelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCamelCase = emb.weight.data
return lin_layer
def a__ ( snake_case__ ) -> Tuple:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCamelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCamelCase = checkpoint["""model"""]
remove_ignore_keys_(snake_case__ )
lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCamelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
lowerCamelCase = XGLMConfig(
vocab_size=snake_case__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase = XGLMForCausalLM(snake_case__ )
lowerCamelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
print(snake_case__ )
lowerCamelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
lowerCAmelCase : Tuple = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 168
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowercase () -> Dict:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=snake_case__ , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=snake_case__ , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=snake_case__ , help="""where to store parsed gold_data_path file""" , )
lowerCAmelCase = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
lowerCAmelCase = json.load(snake_case__ )
for dpr_record in tqdm(snake_case__ ):
lowerCAmelCase = dpr_record["""question"""]
lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(snake_case__ ) + """\n""" )
if __name__ == "__main__":
main()
| 155
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(a ) , """Tatoeba directory does not exist.""" )
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __snake_case( self : str ) -> Tuple:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def __snake_case( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 206
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( a ):
lowercase__ : Tuple = """unispeech-sat"""
def __init__( self : str , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Tuple=3_072 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Union[str, Any]="group" , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Tuple=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Dict=128 , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : Tuple=False , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]=0.0_5 , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : Any=320 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : str=0.1 , _UpperCamelCase : str=100 , _UpperCamelCase : int=256 , _UpperCamelCase : Optional[Any]=256 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : str="mean" , _UpperCamelCase : int=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any=256 , _UpperCamelCase : str=(512, 512, 512, 512, 1_500) , _UpperCamelCase : List[Any]=(5, 3, 3, 1, 1) , _UpperCamelCase : Union[str, Any]=(1, 2, 3, 1, 1) , _UpperCamelCase : Any=512 , _UpperCamelCase : str=0 , _UpperCamelCase : int=1 , _UpperCamelCase : Any=2 , _UpperCamelCase : Optional[Any]=504 , **_UpperCamelCase : str , ) -> int:
'''simple docstring'''
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = num_clusters
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 206
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = LxmertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ = LxmertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 136
|
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_snake_case = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_snake_case = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = list(state_dict.keys() )
for name in state_dict_keys:
_a : str = state_dict.pop(UpperCamelCase__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_a : Dict = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ )
# ffn -> feed_forward
_a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_a : Optional[int] = """rwkv.""" + name
_a : Any = weight
return state_dict
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_a : Tuple = 5_0_2_7_7
_a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ )
_a : int = len(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
# 2. Build the config
_a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : Tuple = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase__ )
# 3. Download model file then convert state_dict
_a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ )
_a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : List[str] = convert_state_dict(UpperCamelCase__ )
# 4. Split in shards and save
_a , _a : List[str] = shard_checkpoint(UpperCamelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if index is not None:
_a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
# Save the index as well
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
_a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 294
| 0
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__snake_case ={
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = {}
state_dict.pop('pixel_mean' , lowerCamelCase )
state_dict.pop('pixel_std' , lowerCamelCase )
lowerCAmelCase = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase = key.replace(lowerCamelCase , lowerCamelCase )
if re.match(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = int(re.match(lowerCamelCase , lowerCamelCase ).group(2 ) )
if layer_nb == 0:
lowerCAmelCase = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
lowerCAmelCase = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
lowerCAmelCase = key.replace('layers.2' , 'proj_out' )
lowerCAmelCase = value
lowerCAmelCase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int="ybelkada/segment-anything" ):
lowerCAmelCase = hf_hub_download(lowerCamelCase , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCAmelCase = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase = SamConfig(
vision_config=lowerCamelCase , )
elif "sam_vit_h" in model_name:
lowerCAmelCase = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase = SamConfig(
vision_config=lowerCamelCase , )
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
lowerCAmelCase = replace_keys(lowerCamelCase )
lowerCAmelCase = SamImageProcessor()
lowerCAmelCase = SamProcessor(image_processor=lowerCamelCase )
lowerCAmelCase = SamModel(lowerCamelCase )
hf_model.load_state_dict(lowerCamelCase )
lowerCAmelCase = hf_model.to('cuda' )
lowerCAmelCase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert('RGB' )
lowerCAmelCase = [[[400, 650]]]
lowerCAmelCase = [[1]]
lowerCAmelCase = processor(images=np.array(lowerCamelCase ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase = hf_model(**lowerCamelCase )
lowerCAmelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase = processor(
images=np.array(lowerCamelCase ) , input_points=lowerCamelCase , input_labels=lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase = hf_model(**lowerCamelCase )
lowerCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase = ((75, 275, 1725, 850),)
lowerCAmelCase = processor(images=np.array(lowerCamelCase ) , input_boxes=lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase = hf_model(**lowerCamelCase )
lowerCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase = [[[400, 650], [800, 650]]]
lowerCAmelCase = [[1, 1]]
lowerCAmelCase = processor(
images=np.array(lowerCamelCase ) , input_points=lowerCamelCase , input_labels=lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase = hf_model(**lowerCamelCase )
lowerCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
__snake_case =["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__snake_case =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 371
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=1_0 , UpperCAmelCase__ : Optional[int]=[8, 1_6, 3_2, 6_4] , UpperCAmelCase__ : str=[1, 1, 2, 1] , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict="relu" , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Tuple=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Union[str, Any]=[2, 3, 4] , UpperCAmelCase__ : Any=1 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = embeddings_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = num_groups
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : int ) -> int:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str ) -> Any:
lowerCAmelCase = BitModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = BitForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Any:
lowerCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[int] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase : Optional[Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = BitModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(config=UpperCAmelCase__ )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
def check_hidden_states_output(UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ):
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase = layer_type
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
pass
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = BitModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : int ) -> Optional[int]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase__ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : int = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase : Dict = BitConfig
lowerCamelCase : Tuple = False
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
lowerCAmelCase = BitModelTester(self )
| 55
| 0
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
__A =False
__A =False
def _UpperCamelCase ( UpperCamelCase__ ):
return TrainCommand(UpperCamelCase__ )
class _snake_case ( a__ ):
@staticmethod
def snake_case__ ( _lowerCamelCase):
UpperCAmelCase__ : Tuple = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""")
train_parser.add_argument(
"""--train_data""" , type=_lowerCamelCase , required=_lowerCamelCase , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=_lowerCamelCase , default=0 , help="""Column of the dataset csv file with example labels.""")
train_parser.add_argument(
"""--column_text""" , type=_lowerCamelCase , default=1 , help="""Column of the dataset csv file with example texts.""")
train_parser.add_argument(
"""--column_id""" , type=_lowerCamelCase , default=2 , help="""Column of the dataset csv file with example ids.""")
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""")
train_parser.add_argument("""--validation_data""" , type=_lowerCamelCase , default="""""" , help="""path to validation dataset.""")
train_parser.add_argument(
"""--validation_split""" , type=_lowerCamelCase , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=_lowerCamelCase , default="""./""" , help="""path to saved the trained model.""")
train_parser.add_argument(
"""--task""" , type=_lowerCamelCase , default="""text_classification""" , help="""Task to train the model on.""")
train_parser.add_argument(
"""--model""" , type=_lowerCamelCase , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""")
train_parser.add_argument("""--train_batch_size""" , type=_lowerCamelCase , default=32 , help="""Batch size for training.""")
train_parser.add_argument("""--valid_batch_size""" , type=_lowerCamelCase , default=64 , help="""Batch size for validation.""")
train_parser.add_argument("""--learning_rate""" , type=_lowerCamelCase , default=3e-5 , help="""Learning rate.""")
train_parser.add_argument("""--adam_epsilon""" , type=_lowerCamelCase , default=1e-0_8 , help="""Epsilon for Adam optimizer.""")
train_parser.set_defaults(func=_lowerCamelCase)
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : Dict = logging.get_logger("""transformers-cli/training""")
UpperCAmelCase__ : int = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=_lowerCamelCase)
UpperCAmelCase__ : Tuple = args.output
UpperCAmelCase__ : Union[str, Any] = args.column_label
UpperCAmelCase__ : str = args.column_text
UpperCAmelCase__ : List[str] = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''')
if args.task == "text_classification":
UpperCAmelCase__ : Optional[int] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''')
UpperCAmelCase__ : Optional[int] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase__ : List[Any] = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''')
UpperCAmelCase__ : List[str] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase__ : Tuple = args.validation_split
UpperCAmelCase__ : Dict = args.train_batch_size
UpperCAmelCase__ : Dict = args.valid_batch_size
UpperCAmelCase__ : Union[str, Any] = args.learning_rate
UpperCAmelCase__ : Tuple = args.adam_epsilon
def snake_case__ ( self):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case__ ( self):
raise NotImplementedError
def snake_case__ ( self):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 163
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase__ ):
return [ord(UpperCamelCase__ ) - 9_6 for elem in plain]
def _UpperCamelCase ( UpperCamelCase__ ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def _UpperCamelCase ( ):
UpperCAmelCase__ : int = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCamelCase__ )
print("""Decoded:""" , decode(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 163
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase__ = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert('''RGB''' )
return image
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = dct.pop(snake_case__ )
UpperCamelCase__ = val
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCamelCase__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCamelCase__ = torch.cat((q_bias, torch.zeros_like(snake_case__, requires_grad=snake_case__ ), v_bias) )
UpperCamelCase__ = qkv_bias
def lowerCamelCase_ ( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = 364 if 'coco' in model_name else 224
UpperCamelCase__ = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase__ = OPTConfig.from_pretrained('''facebook/opt-2.7b''', eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase__ = OPTConfig.from_pretrained('''facebook/opt-6.7b''', eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase__ = TaConfig.from_pretrained('''google/flan-t5-xl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase__ = TaConfig.from_pretrained('''google/flan-t5-xxl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict()
UpperCamelCase__ = BlipaConfig(vision_config=snake_case__, text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Dict=None, UpperCamelCase__ : int=False ):
'''simple docstring'''
UpperCamelCase__ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCamelCase__ = tokenizer('''\n''', add_special_tokens=snake_case__ ).input_ids[0]
UpperCamelCase__ = get_blipa_config(snake_case__, eos_token_id=snake_case__ )
UpperCamelCase__ = BlipaForConditionalGeneration(snake_case__ ).eval()
UpperCamelCase__ = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase__ = load_model_and_preprocess(
name=snake_case__, model_type=snake_case__, is_eval=snake_case__, device=snake_case__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCamelCase__ = original_model.state_dict()
UpperCamelCase__ = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase__ = state_dict.pop(snake_case__ )
if key.startswith('''Qformer.bert''' ):
UpperCamelCase__ = key.replace('''Qformer.bert''', '''qformer''' )
if "attention.self" in key:
UpperCamelCase__ = key.replace('''self''', '''attention''' )
if "opt_proj" in key:
UpperCamelCase__ = key.replace('''opt_proj''', '''language_projection''' )
if "t5_proj" in key:
UpperCamelCase__ = key.replace('''t5_proj''', '''language_projection''' )
if key.startswith('''opt''' ):
UpperCamelCase__ = key.replace('''opt''', '''language''' )
if key.startswith('''t5''' ):
UpperCamelCase__ = key.replace('''t5''', '''language''' )
UpperCamelCase__ = val
# read in qv biases
read_in_q_v_bias(snake_case__, snake_case__ )
UpperCamelCase__ = hf_model.load_state_dict(snake_case__, strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase__ = load_demo_image()
UpperCamelCase__ = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
UpperCamelCase__ = tokenizer(['''\n'''], return_tensors='''pt''' ).input_ids.to(snake_case__ )
# create processor
UpperCamelCase__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size}, image_mean=snake_case__, image_std=snake_case__ )
UpperCamelCase__ = BlipaProcessor(image_processor=snake_case__, tokenizer=snake_case__ )
UpperCamelCase__ = processor(images=snake_case__, return_tensors='''pt''' ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__, snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase__ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCamelCase__ = hf_model(snake_case__, snake_case__ ).logits
else:
UpperCamelCase__ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCamelCase__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 )
UpperCamelCase__ = hf_model(snake_case__, snake_case__, labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''', original_logits[0, :3, :3] )
print('''First values of HF logits:''', logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase__ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]], device=snake_case__ )
assert torch.allclose(logits[0, :3, :3], snake_case__, atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase__ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]], device=snake_case__ )
else:
# cast to same type
UpperCamelCase__ = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ), snake_case__, atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCamelCase__ = ''
UpperCamelCase__ = tokenizer(snake_case__, return_tensors='''pt''' ).input_ids.to(snake_case__ )
UpperCamelCase__ = original_model.generate({'''image''': original_pixel_values} )
UpperCamelCase__ = hf_model.generate(
snake_case__, snake_case__, do_sample=snake_case__, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, )
print('''Original generation:''', snake_case__ )
UpperCamelCase__ = input_ids.shape[1]
UpperCamelCase__ = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=snake_case__ )
UpperCamelCase__ = [text.strip() for text in output_text]
print('''HF generation:''', snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
lowercase = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
lowercase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 364
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __lowercase ( A ):
'''simple docstring'''
_A : int = '''align_text_model'''
def __init__( self : Tuple , _a : Tuple=30_522 , _a : str=768 , _a : Tuple=12 , _a : Dict=12 , _a : Any=3_072 , _a : str="gelu" , _a : int=0.1 , _a : Optional[Any]=0.1 , _a : int=512 , _a : List[str]=2 , _a : Any=0.02 , _a : Dict=1E-12 , _a : Tuple=0 , _a : Optional[Any]="absolute" , _a : str=True , **_a : Union[str, Any] , ):
super().__init__(**_a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = pad_token_id
@classmethod
def A_ ( cls : List[str] , _a : Union[str, os.PathLike] , **_a : Any ):
cls._set_token_in_kwargs(_a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = '''align_vision_model'''
def __init__( self : List[str] , _a : int = 3 , _a : int = 600 , _a : float = 2.0 , _a : float = 3.1 , _a : int = 8 , _a : List[int] = [3, 3, 5, 3, 5, 5, 3] , _a : List[int] = [32, 16, 24, 40, 80, 112, 192] , _a : List[int] = [16, 24, 40, 80, 112, 192, 320] , _a : List[int] = [] , _a : List[int] = [1, 2, 2, 2, 1, 2, 1] , _a : List[int] = [1, 2, 2, 3, 3, 4, 1] , _a : List[int] = [1, 6, 6, 6, 6, 6, 6] , _a : float = 0.25 , _a : str = "swish" , _a : int = 2_560 , _a : str = "mean" , _a : float = 0.02 , _a : float = 0.001 , _a : float = 0.99 , _a : float = 0.2 , **_a : List[Any] , ):
super().__init__(**_a )
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = width_coefficient
UpperCamelCase__ = depth_coefficient
UpperCamelCase__ = depth_divisor
UpperCamelCase__ = kernel_sizes
UpperCamelCase__ = in_channels
UpperCamelCase__ = out_channels
UpperCamelCase__ = depthwise_padding
UpperCamelCase__ = strides
UpperCamelCase__ = num_block_repeats
UpperCamelCase__ = expand_ratios
UpperCamelCase__ = squeeze_expansion_ratio
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dim
UpperCamelCase__ = pooling_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = batch_norm_eps
UpperCamelCase__ = batch_norm_momentum
UpperCamelCase__ = drop_connect_rate
UpperCamelCase__ = sum(_a ) * 4
@classmethod
def A_ ( cls : Tuple , _a : Union[str, os.PathLike] , **_a : Union[str, Any] ):
cls._set_token_in_kwargs(_a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = '''align'''
_A : Optional[int] = True
def __init__( self : Optional[int] , _a : Tuple=None , _a : int=None , _a : Any=640 , _a : Optional[Any]=1.0 , _a : Tuple=0.02 , **_a : List[Any] , ):
super().__init__(**_a )
if text_config is None:
UpperCamelCase__ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
UpperCamelCase__ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
UpperCamelCase__ = AlignTextConfig(**_a )
UpperCamelCase__ = AlignVisionConfig(**_a )
UpperCamelCase__ = projection_dim
UpperCamelCase__ = temperature_init_value
UpperCamelCase__ = initializer_range
@classmethod
def A_ ( cls : Optional[int] , _a : AlignTextConfig , _a : AlignVisionConfig , **_a : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def A_ ( self : Tuple ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 35
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Union[dict, list, tuple, torch.Tensor]):
lowercase__ : Any = []
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCAmelCase__))
elif isinstance(lowerCAmelCase__ , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(lowerCAmelCase__))
elif isinstance(lowerCAmelCase__ , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("Not supported")
return shapes
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple[int, ...]):
lowercase__ : Union[str, Any] = []
for d in reversed(lowerCAmelCase__):
idx.append(flat_idx % d)
lowercase__ : Tuple = flat_idx // d
return tuple(reversed(lowerCAmelCase__))
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Optional[Sequence[bool]] = None , _lowerCamelCase : Optional[Sequence[bool]] = None , ):
def reduce_edge_list(_lowerCamelCase : List[bool]) -> None:
lowercase__ : Dict = True
for i in range(len(lowerCAmelCase__)):
lowercase__ : str = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ : str = l[reversed_idx]
if start_edges is None:
lowercase__ : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(lowerCAmelCase__)
if end_edges is None:
lowercase__ : List[str] = [e == (d - 1) for e, d in zip(lowerCAmelCase__ , lowerCAmelCase__)]
reduce_edge_list(lowerCAmelCase__)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCAmelCase__) == 0:
return [()]
elif len(lowerCAmelCase__) == 1:
return [(slice(start[0] , end[0] + 1),)]
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCAmelCase__ , lowerCAmelCase__):
if s == e:
path_list.append(slice(lowerCAmelCase__ , s + 1))
else:
break
lowercase__ : str = tuple(lowerCAmelCase__)
lowercase__ : str = len(lowerCAmelCase__)
# start == end, and we're done
if divergence_idx == len(lowerCAmelCase__):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : List[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCAmelCase__ , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : Union[str, Any] = end[divergence_idx]
return tuple(
path + (slice(lowerCAmelCase__ , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
lowercase__ : List[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
lowercase__ : Dict = t.shape[:no_batch_dims]
lowercase__ : Optional[int] = list(_flat_idx_to_idx(lowerCAmelCase__ , lowerCAmelCase__))
# _get_minimal_slice_set is inclusive
lowercase__ : Union[str, Any] = list(_flat_idx_to_idx(flat_end - 1 , lowerCAmelCase__))
# Get an ordered list of slices to perform
lowercase__ : List[str] = _get_minimal_slice_set(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
lowercase__ : Optional[int] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def lowercase_ ( _lowerCamelCase : Callable , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : Any = None , _lowerCamelCase : bool = False , ):
if not (len(lowerCAmelCase__) > 0):
raise ValueError("Must provide at least one input")
lowercase__ : Any = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCAmelCase__)]
lowercase__ : Optional[int] = tuple([max(lowerCAmelCase__) for s in zip(*lowerCAmelCase__)])
def _prep_inputs(_lowerCamelCase : torch.Tensor) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
lowercase__ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
lowercase__ : List[str] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
lowercase__ : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
lowercase__ : List[Any] = tensor_tree_map(_prep_inputs , lowerCAmelCase__)
lowercase__ : List[Any] = None
if _out is not None:
lowercase__ : Any = tensor_tree_map(lambda _lowerCamelCase: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
lowercase__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ : Optional[int] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase : torch.Tensor) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ : Any = 0
lowercase__ : Dict = prepped_outputs
for _ in range(lowerCAmelCase__):
# Chunk the input
if not low_mem:
lowercase__ : Tuple = _select_chunk
else:
lowercase__ : Tuple = partial(
_chunk_slice , flat_start=lowerCAmelCase__ , flat_end=min(lowerCAmelCase__ , i + chunk_size) , no_batch_dims=len(lowerCAmelCase__) , )
lowercase__ : int = tensor_tree_map(lowerCAmelCase__ , lowerCAmelCase__)
# Run the layer on the chunk
lowercase__ : Any = layer(**lowerCAmelCase__)
# Allocate space for the output
if out is None:
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , lowerCAmelCase__)
# Put the chunk in its pre-allocated space
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
def assign(_lowerCamelCase : dict , _lowerCamelCase : dict) -> None:
for k, v in da.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
assign(lowerCAmelCase__ , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ : str = da[k]
assign(lowerCAmelCase__ , lowerCAmelCase__)
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
for xa, xa in zip(lowerCAmelCase__ , lowerCAmelCase__):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ : List[str] = xa
elif isinstance(lowerCAmelCase__ , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ : Any = output_chunk
else:
raise ValueError("Not supported")
i += chunk_size
lowercase__ : Tuple = tensor_tree_map(lambda _lowerCamelCase: t.view(orig_batch_dims + t.shape[1:]) , lowerCAmelCase__)
return out
class snake_case_ :
def __init__( self : str , lowercase_ : Any = 5_12 , ) -> int:
lowercase__ : Tuple = max_chunk_size
lowercase__ : Tuple = None
lowercase__ : Any = None
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : int ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ : Dict = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ : Dict = [c for c in candidates if c > min_chunk_size]
lowercase__ : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase_ : List[Any] ) -> bool:
try:
with torch.no_grad():
fn(*lowercase_ , chunk_size=lowercase_ )
return True
except RuntimeError:
return False
lowercase__ : Optional[int] = 0
lowercase__ : Any = len(lowercase_ ) - 1
while i > min_viable_chunk_size_index:
lowercase__ : Any = test_chunk_size(candidates[i] )
if not viable:
lowercase__ : List[str] = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ : int = i
lowercase__ : Tuple = (i + len(lowercase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] ) -> bool:
lowercase__ : List[Any] = True
for aa, aa in zip(lowercase_ , lowercase_ ):
assert type(lowercase_ ) == type(lowercase_ )
if isinstance(lowercase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : Any = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
lowercase__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , ) -> int:
lowercase__ : int = True
lowercase__ : Union[str, Any] = tree_map(lambda lowercase_ : a.shape if isinstance(lowercase_ , torch.Tensor ) else a , lowercase_ , lowercase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase_ )
lowercase__ : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , lowercase_ )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ : int = False
if not consistent:
lowercase__ : Any = self._determine_favorable_chunk_size(
lowercase_ , lowercase_ , lowercase_ , )
lowercase__ : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 87
|
'''simple docstring'''
import numpy as np
class a :
def __init__( self ) -> List[str]:
_a = (0, 0)
_a = None
_a = 0
_a = 0
_a = 0
def __eq__( self , __magic_name__ ) -> Optional[int]:
return self.position == cell.position
def __UpperCAmelCase ( self ) -> Any:
print(self.position )
class a :
def __init__( self , __magic_name__=(5, 5) ) -> Optional[int]:
_a = np.zeros(__magic_name__ )
_a = world_size[0]
_a = world_size[1]
def __UpperCAmelCase ( self ) -> List[Any]:
print(self.w )
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_a = cell.position[0]
_a = cell.position[1]
_a = []
for n in neughbour_cord:
_a = current_x + n[0]
_a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_a = Cell()
_a = (x, y)
_a = cell
neighbours.append(__magic_name__ )
return neighbours
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
_a = []
_a = []
_open.append(lowerCAmelCase__ )
while _open:
_a = np.argmin([n.f for n in _open] )
_a = _open[min_f]
_closed.append(_open.pop(lowerCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowerCAmelCase__ ):
for c in _closed:
if c == n:
continue
_a = current.g + 1
_a , _a = n.position
_a , _a = goal.position
_a = (ya - ya) ** 2 + (xa - xa) ** 2
_a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCAmelCase__ )
_a = []
while current.parent is not None:
path.append(current.position )
_a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a_ : str = Gridworld()
# Start position and goal
a_ : str = Cell()
a_ : Dict = (0, 0)
a_ : Dict = Cell()
a_ : Optional[Any] = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
a_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a_ : Any = 1
print(world.w)
| 168
| 0
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[Any] ) -> str:
'''simple docstring'''
__lowerCamelCase : Any = TaConfig.from_json_file(_lowerCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase : Dict = TaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 64
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "rwkv"
snake_case__ = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any]=50277 , UpperCAmelCase : Dict=1024 , UpperCAmelCase : int=4096 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : str=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=1E-5 , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : int=0 , UpperCAmelCase : Tuple=6 , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[Any]=True , **UpperCAmelCase : Any , ):
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : List[Any] = context_length
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Tuple = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : str = layer_norm_epsilon
__lowerCamelCase : Dict = rescale_every
__lowerCamelCase : Optional[Any] = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Tuple = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 64
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase :Union[str, Any] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 206
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase :str = TypeVar('''T''')
class _lowerCAmelCase ( Generic[T] ):
def __init__(self , lowercase = True ):
A_ : dict[T, list[T]] = {} # dictionary of lists
A_ : Any = directed
def _a (self , lowercase , lowercase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
self.adj_list[destination_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
A_ : Dict = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase )
A_ : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A_ : Optional[Any] = [destination_vertex]
A_ : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
A_ : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A_ : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A_ : int = [destination_vertex]
A_ : List[str] = []
return self
def __repr__(self ):
return pformat(self.adj_list )
| 206
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
__lowerCAmelCase : int ={
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
__lowerCAmelCase : List[str] ={
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(_lowerCamelCase )
return pairs
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , lowercase_ :Optional[int] , lowercase_ :int , lowercase_ :Union[str, Any]="<s>" , lowercase_ :Tuple="</s>" , lowercase_ :Optional[int]="</s>" , lowercase_ :Dict="<s>" , lowercase_ :List[str]="<unk>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :Dict="<mask>" , **lowercase_ :str , )-> int:
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
A__ = vocab_file
A__ = merges_file
A__ = {}
A__ = 0
A__ = 1
A__ = 2
A__ = 3
self.add_from_file(lowercase_ )
A__ = {v: k for k, v in self.encoder.items()}
with open(lowercase_ , encoding="utf-8" ) as merges_handle:
A__ = merges_handle.read().split("\n" )[:-1]
A__ = [tuple(merge.split()[:-1] ) for merge in merges]
A__ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A__ = {}
def UpperCAmelCase_ ( self :Any , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self :Tuple , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self :Optional[int] )-> Any:
return len(self.encoder )
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :List[str] )-> Optional[int]:
if token in self.cache:
return self.cache[token]
A__ = tuple(lowercase_ )
A__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A__ = get_pairs(lowercase_ )
if not pairs:
return token
while True:
A__ = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A__, A__ = bigram
A__ = []
A__ = 0
while i < len(lowercase_ ):
try:
A__ = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(lowercase_ )
A__ = new_word
if len(lowercase_ ) == 1:
break
else:
A__ = get_pairs(lowercase_ )
A__ = "@@ ".join(lowercase_ )
A__ = word[:-4]
A__ = word
return word
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :str )-> Optional[Any]:
A__ = []
A__ = re.findall(R"\S+\n?" , lowercase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase_ ).split(" " ) ) )
return split_tokens
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[int] )-> str:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :List[Any] )-> List[str]:
return self.decoder.get(lowercase_ , self.unk_token )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] )-> Optional[Any]:
A__ = " ".join(lowercase_ ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowercase_ ):
copyfile(self.merges_file , lowercase_ )
return out_vocab_file, out_merge_file
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Dict )-> Tuple:
if isinstance(lowercase_ , lowercase_ ):
try:
with open(lowercase_ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowercase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
A__ = f.readlines()
for lineTmp in lines:
A__ = lineTmp.strip()
A__ = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
A__ = line[:idx]
A__ = len(self.encoder )
| 362
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ):
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCamelCase : Any ):
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : int=False ):
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 )
A__ = False
# load original model from timm
A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(_lowerCamelCase ).eval()
else:
A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(_lowerCamelCase ).unsqueeze(0 )
A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
A__ = model(_lowerCamelCase )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 123
| 0
|
def UpperCamelCase (lowercase_: list ) -> Tuple:
if len(UpperCAmelCase_ ) < 2:
return collection
def circle_sort_util(lowercase_: list , lowercase_: int , lowercase_: int ) -> bool:
A__ : Dict = False
if low == high:
return swapped
A__ : int = low
A__ : Optional[int] = high
while left < right:
if collection[left] > collection[right]:
A__ , A__ : Optional[Any] = (
collection[right],
collection[left],
)
A__ : List[str] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A__ , A__ : Union[str, Any] = (
collection[right + 1],
collection[left],
)
A__ : Optional[int] = True
A__ : Union[str, Any] = low + int((high - low) / 2 )
A__ : List[str] = circle_sort_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ : Tuple = circle_sort_util(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
return swapped or left_swap or right_swap
A__ : Any = True
while is_not_sorted is True:
A__ : Dict = circle_sort_util(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
A_ : List[Any] = input('Enter numbers separated by a comma:\n').strip()
A_ : Any = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 192
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : List[Any] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ : Dict = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model=UpperCamelCase , tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
lowerCamelCase_ = "What is the placebo?"
lowerCamelCase_ = [
{
"image": load_image(UpperCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = dqa_pipeline(UpperCamelCase , top_k=2 )
self.assertEqual(
UpperCamelCase , [
[
{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase ), "start": ANY(UpperCamelCase ), "end": ANY(UpperCamelCase )},
{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase ), "start": ANY(UpperCamelCase ), "end": ANY(UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "How many cats are there?"
lowerCamelCase_ = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , UpperCamelCase )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , words=UpperCamelCase , boxes=UpperCamelCase , top_k=2 )
self.assertEqual(UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase )
lowerCamelCase_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase , revision="3dc6de3" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase )
lowerCamelCase_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase , revision="3dc6de3" , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
| 55
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
snake_case_ = [8, 5, 9, 7]
snake_case_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
snake_case_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A_ :
"""simple docstring"""
def __init__( self :int , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , ) -> None:
UpperCAmelCase = claim_vector
UpperCAmelCase = allocated_resources_table
UpperCAmelCase = maximum_claim_table
def UpperCAmelCase__ ( self :Optional[Any] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase__ ( self :Union[str, Any] ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase__ ( self :str ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase__ ( self :Union[str, Any] ) -> dict[int, list[int]]:
return {self.__need().index(_lowerCamelCase ): i for i in self.__need()}
def UpperCAmelCase__ ( self :Optional[int] , **lowercase_ :int ) -> None:
UpperCAmelCase = self.__need()
UpperCAmelCase = self.__allocated_resources_table
UpperCAmelCase = self.__available_resources()
UpperCAmelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
UpperCAmelCase = False
for each_need in need_list:
UpperCAmelCase = True
for index, need in enumerate(_lowerCamelCase ):
if need > available_resources[index]:
UpperCAmelCase = False
break
if execution:
UpperCAmelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_lowerCamelCase )
# update available/freed resources stack
UpperCAmelCase = np.array(_lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def UpperCAmelCase__ ( self :Optional[int] ) -> Any:
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(_lowerCamelCase ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(_lowerCamelCase ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_lowerCamelCase ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = R'\w+[.]\d+'
UpperCAmelCase = re.findall(lowercase_ , lowercase_ )
for pat in pats:
UpperCAmelCase = key.replace(lowercase_ , '_'.join(pat.split('.' ) ) )
return key
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=42 ):
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase = flax_model.init_weights(PRNGKey(lowercase_ ) )
UpperCAmelCase = flatten_dict(lowercase_ )
UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = rename_key(lowercase_ )
UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(lowercase_ , lowercase_ , lowercase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(lowercase_ )
return unflatten_dict(lowercase_ )
| 181
| 0
|
"""simple docstring"""
import math
def lowercase ( _snake_case : int ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( _snake_case : float = 0.1 ) ->int:
"""simple docstring"""
__snake_case : Tuple = 3
__snake_case : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Any = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] =VOCAB_FILES_NAMES
lowercase : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase : Any =PRETRAINED_INIT_CONFIGURATION
lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] =SqueezeBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase_ :int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowercase_ :Any = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowercase_ :Optional[Any] = do_lower_case
lowercase_ :int = strip_accents
lowercase_ :Optional[Any] = tokenize_chinese_chars
lowercase_ :Union[str, Any] = normalizer_class(**UpperCamelCase_ )
lowercase_ :Optional[int] = do_lower_case
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ):
lowercase_ :Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Tuple = [self.sep_token_id]
lowercase_ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Any = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 252
|
from __future__ import annotations
from random import random
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None ):
lowercase_ :Tuple = value
lowercase_ :Tuple = random()
lowercase_ :Node | None = None
lowercase_ :Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self ):
lowercase_ :Optional[int] = str(self.value ) + ''' '''
lowercase_ :List[str] = str(self.left or '''''' )
lowercase_ :List[Any] = str(self.right or '''''' )
return value + left + right
def UpperCamelCase ( _a , _a ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase_ , lowercase_ :List[Any] = split(root.left , _a )
return left, root
else:
lowercase_ , lowercase_ :Tuple = split(root.right , _a )
return root, right
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase_ :Tuple = merge(left.right , _a )
return left
else:
lowercase_ :Optional[int] = merge(_a , right.left )
return right
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
lowercase_ :str = Node(_a )
lowercase_ , lowercase_ :Dict = split(_a , _a )
return merge(merge(_a , _a ) , _a )
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
lowercase_ , lowercase_ :List[str] = split(_a , value - 1 )
lowercase_ , lowercase_ :Tuple = split(_a , _a )
return merge(_a , _a )
def UpperCamelCase ( _a ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase_ :Any = insert(_a , int(arg[1:] ) )
elif arg[0] == "-":
lowercase_ :Optional[int] = erase(_a , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :List[Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase_ :Optional[Any] = input()
while args != "q":
lowercase_ :Union[str, Any] = interact_treap(_a , _a )
print(_a )
lowercase_ :str = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 252
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = RoCBertTokenizer
lowercase__ = None
lowercase__ = False
lowercase__ = True
lowercase__ = filter_non_english
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
_snake_case : Tuple = {}
_snake_case : Any = {}
for i, value in enumerate(a_ ):
_snake_case : List[str] = i
_snake_case : Optional[int] = i
_snake_case : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""word_shape_file"""] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file, """w""", encoding="""utf-8""" ) as word_shape_writer:
json.dump(a_, a_, ensure_ascii=a_ )
with open(self.word_pronunciation_file, """w""", encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(a_, a_, ensure_ascii=a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
_snake_case : str = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(a_, ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(a_ ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(a_ ), [5, 6, 2, 5, 7, 8] )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ), ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = RoCBertBasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ), ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = RoCBertBasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""h\u00E9llo"""] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = RoCBertBasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[int] = RoCBertBasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = RoCBertBasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : str = RoCBertBasicTokenizer(do_lower_case=a_, never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_snake_case : Optional[Any] = {}
for i, token in enumerate(a_ ):
_snake_case : Dict = i
_snake_case : Optional[Any] = RoCBertWordpieceTokenizer(vocab=a_, unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ), [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ), ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ), ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a_ ) for t in ["""Test""", """\xad""", """test"""]], [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
_snake_case : Optional[int] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(a_ ) for t in ["""Test""", """\xad""", """test"""]], [["""[UNK]"""], [], ["""[UNK]"""]] )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_snake_case : List[Any] = tokenizer_r.encode_plus(
a_, return_attention_mask=a_, return_token_type_ids=a_, return_offsets_mapping=a_, add_special_tokens=a_, )
_snake_case : Optional[Any] = tokenizer_r.do_lower_case if hasattr(a_, """do_lower_case""" ) else False
_snake_case : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results], tokens["""offset_mapping"""] )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = ["""的""", """人""", """有"""]
_snake_case : Any = """""".join(a_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : int = True
_snake_case : Tuple = self.tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : Optional[Any] = tokenizer_p.encode(a_, add_special_tokens=a_ )
_snake_case : int = tokenizer_r.encode(a_, add_special_tokens=a_ )
_snake_case : Optional[Any] = tokenizer_r.convert_ids_to_tokens(a_ )
_snake_case : Optional[int] = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, a_ )
_snake_case : List[str] = False
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : str = self.tokenizer_class.from_pretrained(a_, **a_ )
_snake_case : Optional[Any] = tokenizer_r.encode(a_, add_special_tokens=a_ )
_snake_case : Any = tokenizer_p.encode(a_, add_special_tokens=a_ )
_snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(a_ )
_snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that only the first Chinese character is not preceded by "##".
_snake_case : List[Any] = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(a_ )
]
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, a_ )
@slow
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
_snake_case : Tuple = tokenizer.encode("""你好""", add_special_tokens=a_ )
_snake_case : Optional[int] = tokenizer.encode("""你是谁""", add_special_tokens=a_ )
_snake_case : str = tokenizer.build_inputs_with_special_tokens(a_ )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(a_, a_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : int = """你好,你是谁"""
_snake_case : List[str] = tokenizer.tokenize(a_ )
_snake_case : List[str] = tokenizer.convert_tokens_to_ids(a_ )
_snake_case : List[Any] = tokenizer.convert_tokens_to_shape_ids(a_ )
_snake_case : Optional[Any] = tokenizer.convert_tokens_to_pronunciation_ids(a_ )
_snake_case : Dict = tokenizer.prepare_for_model(
a_, a_, a_, add_special_tokens=a_ )
_snake_case : Dict = tokenizer.encode_plus(a_, add_special_tokens=a_ )
self.assertEqual(a_, a_ )
| 64
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case : Tuple = 1_92
_snake_case : Any = 7_68
_snake_case : Any = 12
_snake_case : List[Any] = 3
_snake_case : int = [8_00, 13_33]
_snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = 3_30
_snake_case : List[str] = 14
_snake_case : List[str] = 6
_snake_case : Union[str, Any] = 13_20
elif "yolos_s" in yolos_name:
_snake_case : Union[str, Any] = 3_84
_snake_case : List[str] = 15_36
_snake_case : Any = 12
_snake_case : Optional[int] = 6
elif "yolos_b" in yolos_name:
_snake_case : Dict = [8_00, 13_44]
_snake_case : str = 91
_snake_case : Optional[Any] = """huggingface/label-files"""
_snake_case : str = """coco-detection-id2label.json"""
_snake_case : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : List[str] = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosConfig , snake_case__ : bool = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
_snake_case : Union[str, Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Any = in_proj_weight[: config.hidden_size, :]
_snake_case : Optional[Any] = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Tuple = in_proj_weight[-config.hidden_size :, :]
_snake_case : List[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
if "backbone" in name:
_snake_case : str = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
_snake_case : Union[str, Any] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
_snake_case : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_snake_case : str = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_snake_case : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_snake_case : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_snake_case : str = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_snake_case : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_snake_case : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_snake_case : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_snake_case : str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_snake_case : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_snake_case : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
_snake_case : Union[str, Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
_snake_case : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
_snake_case : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def UpperCAmelCase__ (snake_case__ : dict , snake_case__ : YolosForObjectDetection ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_snake_case : List[str] = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
_snake_case : Optional[Any] = key.split(""".""" )
_snake_case : Optional[Any] = int(key_split[2] )
_snake_case : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : Optional[Any] = val[
dim : dim * 2, :
]
_snake_case : Optional[Any] = val[-dim:, :]
else:
_snake_case : Dict = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
else:
_snake_case : Tuple = val
return orig_state_dict
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
_snake_case : Optional[Any] = get_yolos_config(snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
_snake_case : Optional[Any] = YolosForObjectDetection(snake_case__ )
model.eval()
_snake_case : Optional[Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case : List[str] = 8_00 if yolos_name != """yolos_ti""" else 5_12
_snake_case : Optional[int] = YolosImageProcessor(format="""coco_detection""" , size=snake_case__ )
_snake_case : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
_snake_case , _snake_case : Optional[int] = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case : Dict = None, None
if yolos_name == "yolos_ti":
_snake_case : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_snake_case : Tuple = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case : List[str] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_snake_case : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case : Dict = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_snake_case : Union[str, Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_snake_case : Tuple = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_snake_case : Optional[Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_snake_case : int = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_snake_case : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
_snake_case : Dict = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_snake_case : str = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="""hustvl""" )
model.push_to_hub(snake_case__ , organization="""hustvl""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 64
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int , snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = []
__UpperCAmelCase , __UpperCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__UpperCAmelCase = result + left + right
return input_list
def lowercase__ ( snake_case_ :list ):
if len(snake_case_ ) <= 1:
return input_list
__UpperCAmelCase = list(snake_case_ )
# iteration for two-way merging
__UpperCAmelCase = 2
while p <= len(snake_case_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case_ ) , snake_case_ ):
__UpperCAmelCase = i
__UpperCAmelCase = i + p - 1
__UpperCAmelCase = (low + high + 1) // 2
__UpperCAmelCase = merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# final merge of last two parts
if p * 2 >= len(snake_case_ ):
__UpperCAmelCase = i
__UpperCAmelCase = merge(snake_case_ , 0 , snake_case_ , len(snake_case_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowercase : Dict = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_lowercase : Dict = []
else:
_lowercase : List[str] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 353
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : Tuple = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
_lowercase : str = {'allegro/herbert-base-cased': 5_14}
_lowercase : Tuple = {}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_INIT_CONFIGURATION
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = HerbertTokenizer
def __init__( self : List[Any] , _lowercase : Optional[int]=None , _lowercase : int=None , _lowercase : Tuple=None , _lowercase : str="<s>" , _lowercase : List[str]="<unk>" , _lowercase : int="<pad>" , _lowercase : str="<mask>" , _lowercase : List[Any]="</s>" , **_lowercase : List[Any] , ):
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sep_token=_lowercase , **_lowercase , )
def a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : str , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 86
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_UpperCamelCase = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_UpperCamelCase = "" if has_file(args.repo_path, '''config.json''') else "unet"
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
_UpperCamelCase = reader.read()
_UpperCamelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
_UpperCamelCase = UNetaDModel(**config)
else:
_UpperCamelCase = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_UpperCamelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_UpperCamelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_UpperCamelCase = config[key]
del config[key]
_UpperCamelCase = [k.replace('''UNetRes''', '''''') for k in config["down_block_types"]]
_UpperCamelCase = [k.replace('''UNetRes''', '''''') for k in config["up_block_types"]]
if do_only_weights:
_UpperCamelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
_UpperCamelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
_UpperCamelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
_UpperCamelCase = param_value
_UpperCamelCase = True
if not has_changed:
_UpperCamelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 254
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Load configuration defined in the metadata file
with open(__lowerCamelCase ) as metadata_file:
__snake_case : Tuple = json.load(__lowerCamelCase )
__snake_case : int = LukeConfig(use_entity_aware_attention=__lowerCamelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : Any = torch.load(__lowerCamelCase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Any = load_original_entity_vocab(__lowerCamelCase )
# add an entry for [MASK2]
__snake_case : int = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : List[Any] = AddedToken("<ent>" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
__snake_case : str = AddedToken("<ent2>" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : int = json.load(__lowerCamelCase )
__snake_case : str = "MLukeTokenizer"
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
__snake_case : Any = MLukeTokenizer.from_pretrained(__lowerCamelCase )
# Initialize the embeddings of the special tokens
__snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Tuple = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Tuple = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : Optional[int] = state_dict[bias_name]
__snake_case : Any = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
__snake_case : int = state_dict[prefix + matrix_name]
__snake_case : Optional[Any] = state_dict[prefix + matrix_name]
__snake_case : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : Union[str, Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : str = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Optional[int] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=__lowerCamelCase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : Dict = state_dict[key]
else:
__snake_case : int = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
if set(__lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(__lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : Union[str, Any] = MLukeTokenizer.from_pretrained(__lowerCamelCase , task="entity_classification" )
__snake_case : Optional[Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Tuple = (0, 9)
__snake_case : Dict = tokenizer(__lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 3_3, 7_6_8) )
__snake_case : List[str] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Union[str, Any] = torch.Size((1, 1, 7_6_8) )
__snake_case : Optional[int] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : Union[str, Any] = MLukeTokenizer.from_pretrained(__lowerCamelCase )
__snake_case : List[Any] = "Tokyo is the capital of <mask>."
__snake_case : List[Any] = (2_4, 3_0)
__snake_case : Tuple = tokenizer(__lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
__snake_case : Tuple = encoding["input_ids"][0].tolist()
__snake_case : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCamelCase )
__snake_case : Dict = outputs.entity_logits[0][0].argmax().item()
__snake_case : Dict = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__lowerCamelCase ) )
model.save_pretrained(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Tuple = [json.loads(__lowerCamelCase ) for line in open(__lowerCamelCase )]
__snake_case : Dict = {}
for entry in data:
__snake_case : Optional[Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Union[str, Any] = entity_id
break
__snake_case : Tuple = F'{language}:{entity_name}'
__snake_case : int = entity_id
return new_mapping
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_snake_case : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 123
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ ( _lowerCamelCase):
A_ : Tuple = 'new-model'
if is_tf_available():
class A__ ( _lowerCamelCase):
A_ : str = NewModelConfig
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'bert-base-cased'
__lowerCAmelCase : Any = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'bert-base-cased'
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_probability
def __lowerCamelCase ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = TFAutoModelForTableQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = TFAutoModelForTableQuestionAnswering.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__lowerCAmelCase : Any = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = copy.deepcopy(model.config )
__lowerCAmelCase : Tuple = ['FunnelBaseModel']
__lowerCAmelCase : Union[str, Any] = TFAutoModel.from_config(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
try:
AutoConfig.register('new-model' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
auto_class.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase : Optional[Any] = BertModelTester(self ).get_config()
__lowerCAmelCase : Any = NewModelConfig(**tiny_config.to_dict() )
__lowerCAmelCase : Tuple = auto_class.from_config(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = auto_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'bert-base is not a local folder and is not a valid model identifier' ):
__lowerCAmelCase : Any = TFAutoModel.from_pretrained('bert-base' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , revision='aaaaaa' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__lowerCAmelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCamelCase ( self ):
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE , 'Use `from_pt=True` to load this model' ):
__lowerCAmelCase : Dict = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowerCamelCase ( self ):
# Make sure we have cached the model.
__lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowerCAmelCase : Dict = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
__lowerCAmelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 182
|
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = str(_UpperCamelCase )
return len(_UpperCamelCase ) == 9 and set(_UpperCamelCase ) == set('123456789' )
def __lowerCAmelCase ():
for base_num in range(9999 , 4999 , -1 ):
__lowerCAmelCase : Union[str, Any] = 10_0002 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCAmelCase : Dict = 100_2003 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 182
| 1
|
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE :List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE :Optional[int] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE :Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) ,codebase_urls=["https://www.atticusprojectai.org/cuad"] ,reference_urls=["https://www.atticusprojectai.org/cuad"] ,)
def UpperCamelCase_ ( self : Dict ,A : Optional[int] ,A : Dict ):
__A = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__A = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__A = evaluate(dataset=A ,predictions=A )
return score
| 15
|
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase__ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 181
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
snake_case__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Dict )-> Dict:
__A = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
__A = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__A = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__A = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__A = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss
__A = -(labels.shape[-1] * loss.item())
__A = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 250
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Any = DanceDiffusionPipeline
UpperCamelCase : List[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
UpperCamelCase : Any = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : List[Any] = False
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A , use_timestep_embedding=A , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
lowerCamelCase = IPNDMScheduler()
lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __A ( self , A , A=0 ) -> Any:
'''simple docstring'''
if str(A ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(A )
else:
lowerCamelCase = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = DanceDiffusionPipeline(**A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = pipe(**A )
lowerCamelCase = output.audios
lowerCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCamelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __A ( self ) -> List[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __A ( self ) -> Tuple:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __A ( self ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = torch_device
lowerCamelCase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(generator=A , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCamelCase = output.audios
lowerCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = torch_device
lowerCamelCase = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(generator=A , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCamelCase = output.audios
lowerCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 252
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ):
'''simple docstring'''
if gpta_config_file == "":
lowerCamelCase = GPTaConfig()
else:
lowerCamelCase = GPTaConfig.from_json_file(lowerCamelCase__ )
lowerCamelCase = GPTaModel(lowerCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
lowerCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 252
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
snake_case_ = emb.weight.data
return lin_layer
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__="facebook/mbart-large-en-ro" , UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
remove_ignore_keys_(UpperCamelCase__ )
snake_case_ = state_dict['encoder.embed_tokens.weight'].shape[0]
snake_case_ = MBartConfig.from_pretrained(UpperCamelCase__ , vocab_size=UpperCamelCase__ )
if mbart_aa and finetuned:
snake_case_ = 'relu'
snake_case_ = state_dict['decoder.embed_tokens.weight']
snake_case_ = MBartForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ )
if finetuned:
snake_case_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
_UpperCAmelCase : Any = parser.parse_args()
_UpperCAmelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 355
|
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
create_all_state(1 , UpperCamelCase__ , UpperCamelCase__ , [] , UpperCamelCase__ )
return result
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase__ , total_number - level + 2 ):
current_list.append(UpperCamelCase__ )
create_all_state(i + 1 , UpperCamelCase__ , level - 1 , UpperCamelCase__ , UpperCamelCase__ )
current_list.pop()
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
for i in total_list:
print(*UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : str = 4
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 200
| 0
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=False ):
"""simple docstring"""
try:
a__ : str =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ : str =default
else:
# KEY is set, convert it to True or False.
try:
a__ : Optional[int] =strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase : Optional[Any] = parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCAmelCase : int = parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCAmelCase : Any = parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCAmelCase : List[Any] = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCAmelCase : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCAmelCase : str = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCAmelCase : Dict = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCAmelCase : Optional[int] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCAmelCase : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCAmelCase : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCAmelCase : Any = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _A ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
a__ : Dict =unittest.skip("test requires faiss" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
a__ : Dict =unittest.skip("test requires regex" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
a__ : Optional[int] =unittest.skip("test requires elasticsearch" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
a__ : Union[str, Any] =unittest.skip("test requires sqlalchemy" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
a__ : List[str] =unittest.skip("test requires PyTorch" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if not config.TF_AVAILABLE:
a__ : List[Any] =unittest.skip("test requires TensorFlow" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
a__ : Tuple =unittest.skip("test requires JAX" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
a__ : Tuple =unittest.skip("test requires Pillow" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(_UpperCamelCase )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(_UpperCamelCase )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(_UpperCamelCase )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
def _require_spacy_model(SCREAMING_SNAKE_CASE : Optional[int] ):
try:
import spacy # noqa F401
spacy.load(_UpperCamelCase )
except ImportError:
return unittest.skip("test requires spacy" )(_UpperCamelCase )
except OSError:
return unittest.skip("test requires spacy model \'{}\'".format(_UpperCamelCase ) )(_UpperCamelCase )
else:
return test_case
return _require_spacy_model
def _A ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(_UpperCamelCase )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(_UpperCamelCase )
else:
return test_case
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
a__ : Union[str, Any] =unittest.skip("test is slow" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
a__ : str =unittest.skip("test is local" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
a__ : Union[str, Any] =unittest.skip("test is packaged" )(_UpperCamelCase )
return test_case
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
a__ : int =unittest.skip("test requires remote" )(_UpperCamelCase )
return test_case
def _A ( *SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
def decorate(cls : Dict ):
for name, fn in cls.__dict__.items():
if callable(_UpperCamelCase ) and name.startswith("test" ):
for decorator in decorators:
a__ : Any =decorator(_UpperCamelCase )
setattr(cls , _UpperCamelCase , _UpperCamelCase )
return cls
return decorate
class __lowerCAmelCase ( _lowerCamelCase):
pass
class __lowerCAmelCase ( _lowerCamelCase):
_lowercase : Tuple = 0
_lowercase : List[Any] = 1
_lowercase : Optional[int] = 2
@contextmanager
def _A ( SCREAMING_SNAKE_CASE : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE : Tuple=1e-16 ):
"""simple docstring"""
a__ : List[str] =requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ):
# Change the url to an invalid url so that the connection hangs
a__ : Optional[int] ='https://10.255.255.1'
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
a__ : List[Any] =timeout
try:
return online_request(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
a__ : Tuple =url
a__ : Optional[int] =e.args[0]
a__ : Any =(max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),)
a__ : List[str] =(max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
raise requests.ConnectionError("Offline mode is enabled." , request=_UpperCamelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _UpperCamelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCamelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def _A ( *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Tuple =str(Path().resolve() )
with tempfile.TemporaryDirectory(*_UpperCamelCase , **_UpperCamelCase ) as tmp_dir:
try:
os.chdir(_UpperCamelCase )
yield
finally:
os.chdir(_UpperCamelCase )
@contextmanager
def _A ( ):
"""simple docstring"""
import gc
gc.collect()
a__ : Optional[Any] =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _A ( ):
"""simple docstring"""
import gc
gc.collect()
a__ : List[Any] =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist()
def _A ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
try:
return func(*_UpperCamelCase , **_UpperCamelCase )
except HTTPError as err:
if str(_UpperCamelCase ).startswith("500" ) or str(_UpperCamelCase ).startswith("502" ):
pytest.xfail(str(_UpperCamelCase ) )
raise err
return decorator.decorator(_wrapper , _UpperCamelCase )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] =returncode
a__ : Optional[Any] =stdout
a__ : Optional[int] =stderr
async def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
while True:
a__ : Optional[Any] =await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(_UpperCamelCase ) )
a__ : Any =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ : int =[]
a__ : Tuple =[]
def tee(SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple="" ):
a__ : Tuple =line.decode("utf-8" ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label="stderr:" ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Tuple=180 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : str=True ):
"""simple docstring"""
a__ : List[Any] =asyncio.get_event_loop()
a__ : Union[str, Any] =loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
a__ : List[Any] =' '.join(_UpperCamelCase )
if result.returncode > 0:
a__ : Union[str, Any] ='\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def _A ( ):
"""simple docstring"""
a__ : str =os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
a__ : Any =re.sub(r"^gw" , "" , _UpperCamelCase , 0 , re.M )
return int(_UpperCamelCase )
def _A ( ):
"""simple docstring"""
a__ : int =29_500
a__ : Optional[int] =pytest_xdist_worker_id()
return port + uniq_delta
| 95
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 86
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase ( _A : List[str] )-> Optional[Any]:
"""simple docstring"""
if is_torch_version("<" , "2.0.0" ) or not hasattr(_A , "_dynamo" ):
return False
return isinstance(_A , torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase ( _A : str , _A : bool = True )-> Optional[Any]:
"""simple docstring"""
A__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A__ = is_compiled_module(_A )
if is_compiled:
A__ = model
A__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_A , _A ):
A__ = model.module
if not keep_fpaa_wrapper:
A__ = getattr(_A , "forward" )
A__ = model.__dict__.pop("_original_forward" , _A )
if original_forward is not None:
while hasattr(_A , "__wrapped__" ):
A__ = forward.__wrapped__
if forward == original_forward:
break
A__ = forward
if getattr(_A , "_converted_to_transformer_engine" , _A ):
convert_model(_A , to_transformer_engine=_A )
if is_compiled:
A__ = model
A__ = compiled_model
return model
def UpperCamelCase ( )-> List[str]:
"""simple docstring"""
PartialState().wait_for_everyone()
def UpperCamelCase ( _A : Optional[int] , _A : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_A , _A )
elif PartialState().local_process_index == 0:
torch.save(_A , _A )
@contextmanager
def UpperCamelCase ( **_A : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
A__ = str(_A )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase ( _A : Tuple )-> List[Any]:
"""simple docstring"""
if not hasattr(_A , "__qualname__" ) and not hasattr(_A , "__name__" ):
A__ = getattr(_A , "__class__" , _A )
if hasattr(_A , "__qualname__" ):
return obj.__qualname__
if hasattr(_A , "__name__" ):
return obj.__name__
return str(_A )
def UpperCamelCase ( _A : Any , _A : Union[str, Any] )-> Tuple:
"""simple docstring"""
for key, value in source.items():
if isinstance(_A , _A ):
A__ = destination.setdefault(_A , {} )
merge_dicts(_A , _A )
else:
A__ = value
return destination
def UpperCamelCase ( _A : int = None )-> bool:
"""simple docstring"""
if port is None:
A__ = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 198
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCamelCase ( _A : int )-> Optional[Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_A , _A , bias=_A )
A__ = emb.weight.data
return lin_layer
def UpperCamelCase ( _A : str , _A : Optional[Any]=None )-> str:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A : Tuple , _A : Tuple , _A : int , _A : str , _A : str = WEIGHTS_NAME )-> List[str]:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
A__ = torch.load(_A )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = os.path.join(
_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
A__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(_A ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCAmelCase_ : Any = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase_ : Tuple = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 198
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase__ ( unittest.TestCase):
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = TensorFlowBenchmark(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''sgugger/tiny-distilbert-classification'''
SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmark(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = TensorFlowBenchmark(UpperCamelCase__ , [config] )
SCREAMING_SNAKE_CASE : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(UpperCamelCase__ , [config] )
SCREAMING_SNAKE_CASE : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmark(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmark(UpperCamelCase__ , [config] )
SCREAMING_SNAKE_CASE : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmark(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(UpperCamelCase__ , '''env.csv''' ) , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , '''env.csv''' ) ).exists() )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(UpperCamelCase__ : Dict ):
self.assertTrue(hasattr(UpperCamelCase__ , '''sequential''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''cumulative''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''current''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , '''log.txt''' ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmark(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , '''log.txt''' ) ).exists() )
| 182
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""DeiTFeatureExtractor"""]
UpperCAmelCase__ = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.