code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import qiskit
def A ( a_ ,a_ ) -> qiskit.result.counts.Counts:
__UpperCamelCase : Any =qiskit.Aer.get_backend('aer_simulator' )
__UpperCamelCase : List[str] =qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCamelCase : Dict =qiskit.execute(a_ ,a_ ,shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(a_ )
if __name__ == "__main__":
A_ :Union[str, Any] = half_adder(1, 1)
print(f"Half Adder Output Qubit Counts: {counts}")
| 71
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( a_ ,a_=1_000 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCamelCase : List[Any] =n - 1
__UpperCamelCase : Dict =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCamelCase : Optional[Any] =0
while count < prec:
__UpperCamelCase : Dict =random.randint(2 ,n - 1 )
__UpperCamelCase : Optional[Any] =bin_exp_mod(a_ ,a_ ,a_ )
if b != 1:
__UpperCamelCase : List[str] =True
for _ in range(a_ ):
if b == n - 1:
__UpperCamelCase : Tuple =False
break
__UpperCamelCase : Dict =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A_ :str = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 71
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = '''mobilenet_v1'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : str=2_2_4 , lowerCAmelCase_ : List[str]=1.0 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=0.999 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[Any]=0.001 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Optional[int] = image_size
_A: Optional[Any] = depth_multiplier
_A: Tuple = min_depth
_A: Any = hidden_act
_A: Dict = tf_padding
_A: List[Any] = classifier_dropout_prob
_A: Tuple = initializer_range
_A: Tuple = layer_norm_eps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-4
| 365
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Any = MBartConfig
__UpperCamelCase : Tuple = {}
__UpperCamelCase : Dict = '''gelu'''
def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[Any] = batch_size
_A: Dict = seq_length
_A: Dict = is_training
_A: str = use_labels
_A: int = vocab_size
_A: str = hidden_size
_A: Tuple = num_hidden_layers
_A: Optional[Any] = num_attention_heads
_A: Tuple = intermediate_size
_A: int = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Tuple = max_position_embeddings
_A: Dict = eos_token_id
_A: int = pad_token_id
_A: Any = bos_token_id
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder()
_A: List[str] = inputs_dict['''input_ids''']
_A: Tuple = input_ids[:1, :]
_A: List[Any] = inputs_dict['''attention_mask'''][:1, :]
_A: str = inputs_dict['''head_mask''']
_A: Optional[Any] = 1
# first forward pass
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_A , _A: List[str] = outputs.to_tuple()
_A: Dict = past_key_values[1]
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
if attention_mask is None:
_A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A: Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : int = False
__UpperCamelCase : Optional[Any] = False
def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = TFMBartModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCamelCase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ )
self.assertListEqual(self.expected_text , lowerCAmelCase_ )
def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' )
_A: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
return generated_words
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 301
| 0
|
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False):
UpperCAmelCase__ : str = scheduler
UpperCAmelCase__ : Dict = optimizers if isinstance(_lowerCamelCase , (list, tuple)) else [optimizers]
UpperCAmelCase__ : List[Any] = split_batches
UpperCAmelCase__ : Tuple = step_with_optimizer
UpperCAmelCase__ : Union[str, Any] = GradientState()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase__ : Dict = AcceleratorState().num_processes
for _ in range(_lowerCamelCase):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps"""):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_last_lr()
def snake_case__ ( self):
return self.scheduler.state_dict()
def snake_case__ ( self , _lowerCamelCase):
self.scheduler.load_state_dict(_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_lr()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase)
| 163
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( a__ ):
lowerCAmelCase :Dict = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Union[str, Any] = '''BlipImageProcessor'''
lowerCAmelCase :Any = '''AutoTokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
# add QFormer tokenizer
UpperCAmelCase__ : List[str] = qformer_tokenizer
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""")
UpperCAmelCase__ : List[str] = BatchFeature()
if text is not None:
UpperCAmelCase__ : Any = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
encoding.update(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.qformer_tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Dict = qformer_text_encoding.pop("""input_ids""")
UpperCAmelCase__ : Tuple = qformer_text_encoding.pop("""attention_mask""")
if images is not None:
UpperCAmelCase__ : List[str] = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def snake_case__ ( self , _lowerCamelCase , **_lowerCamelCase):
if os.path.isfile(_lowerCamelCase):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''')
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , """qformer_tokenizer""")
self.qformer_tokenizer.save_pretrained(_lowerCamelCase)
return super().save_pretrained(_lowerCamelCase , **_lowerCamelCase)
@classmethod
def snake_case__ ( cls , _lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_lowerCamelCase , subfolder="""qformer_tokenizer""")
UpperCAmelCase__ : List[Any] = cls._get_arguments_from_pretrained(_lowerCamelCase , **_lowerCamelCase)
args.append(_lowerCamelCase)
return cls(*_lowerCamelCase)
| 163
| 1
|
import unittest
import numpy as np
def UpperCAmelCase_ ( __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray | None = None , ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = np.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.shape(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = np.shape(__UpperCAmelCase )
if shape_a[0] != shape_b[0]:
SCREAMING_SNAKE_CASE_ = (
'Expected the same number of rows for A and B. '
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__UpperCAmelCase )
if shape_b[1] != shape_c[1]:
SCREAMING_SNAKE_CASE_ = (
'Expected the same number of columns for B and C. '
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = pseudo_inv
if a_inv is None:
try:
SCREAMING_SNAKE_CASE_ = np.linalg.inv(__UpperCAmelCase )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE_ = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE_ = np.array([[2, 1], [6, 3]] )
SCREAMING_SNAKE_CASE_ = schur_complement(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.block([[a, b], [b.T, c]] )
SCREAMING_SNAKE_CASE_ = np.linalg.det(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.linalg.det(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.linalg.det(_lowerCAmelCase )
self.assertAlmostEqual(_lowerCAmelCase , det_a * det_s )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE_ = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowerCAmelCase ):
schur_complement(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE_ = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowerCAmelCase ):
schur_complement(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 210
|
def UpperCAmelCase_ ( ) -> int:
return 1
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int = 2_00 ) -> int:
return two_pound(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 210
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Optional[Any]=2 , __snake_case : int=True , __snake_case : str=False , __snake_case : List[str]=10 , __snake_case : Union[str, Any]=3 , __snake_case : List[Any]=32 * 4 , __snake_case : str=32 * 6 , __snake_case : int=4 , __snake_case : str=32 , ) -> str:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = is_training
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = num_queries
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_size
_lowerCAmelCase = max_size
_lowerCAmelCase = num_labels
_lowerCAmelCase = mask_feature_size
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
_lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
_lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
_lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
_lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Any ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] , __snake_case : str , __snake_case : Optional[int] ) -> List[Any]:
_lowerCAmelCase = output.encoder_hidden_states
_lowerCAmelCase = output.pixel_decoder_hidden_states
_lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_config.decoder_layers )
def lowercase__ ( self : str , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict=False ) -> Dict:
with torch.no_grad():
_lowerCAmelCase = MaskFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(pixel_values=__snake_case , pixel_mask=__snake_case )
_lowerCAmelCase = model(__snake_case , output_hidden_states=__snake_case )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def lowercase__ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : Dict , __snake_case : Dict , __snake_case : str ) -> str:
_lowerCAmelCase = MaskFormerForInstanceSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCAmelCase = model(pixel_values=__snake_case , pixel_mask=__snake_case )
_lowerCAmelCase = model(__snake_case )
comm_check_on_output(__snake_case )
_lowerCAmelCase = model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Any = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowercase: int = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowercase: Optional[int] = False
_lowercase: Union[str, Any] = False
_lowercase: Dict = False
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Tuple ) -> List[Any]:
_lowerCAmelCase = MaskFormerModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def lowercase__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ) -> str:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def lowercase__ ( self : str ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowercase__ ( self : List[Any] ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowercase__ ( self : List[str] ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Optional[Any] ) -> Dict:
pass
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def lowercase__ ( self : Optional[Any] ) -> str:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCAmelCase = MaskFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = (self.model_tester.min_size,) * 2
_lowerCAmelCase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__snake_case ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__snake_case ),
"""class_labels""": torch.zeros(2 , 10 , device=__snake_case ).long(),
}
_lowerCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__snake_case )
_lowerCAmelCase = model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def lowercase__ ( self : str ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case ).to(__snake_case )
_lowerCAmelCase = model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : Tuple ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.train()
_lowerCAmelCase = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def lowercase__ ( self : Dict ) -> int:
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.train()
_lowerCAmelCase = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
_lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A__ : int =1e-4
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Dict ) -> Dict:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__snake_case )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case )
_lowerCAmelCase = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
_lowerCAmelCase = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
_lowerCAmelCase = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def lowercase__ ( self : Any ) -> Tuple:
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__snake_case )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case )
# masks_queries_logits
_lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCAmelCase = torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
_lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__snake_case )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(__snake_case , return_tensors="""pt""" ).to(__snake_case )
_lowerCAmelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case )
# masks_queries_logits
_lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCAmelCase = torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
_lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__snake_case )
.eval()
)
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
_lowerCAmelCase = inputs["""pixel_values"""].to(__snake_case )
_lowerCAmelCase = [el.to(__snake_case ) for el in inputs["""mask_labels"""]]
_lowerCAmelCase = [el.to(__snake_case ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 70
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ : Any = TypeVar("T")
class a ( Generic[T] ):
def __init__( self , __magic_name__ , __magic_name__ ) -> None:
_a = None
_a = len(__magic_name__ )
_a = [any_type for _ in range(self.N )] + arr
_a = fnc
self.build()
def __UpperCAmelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
p += self.N
_a = v
while p > 1:
_a = p // 2
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> T | None: # noqa: E741
_a , _a = l + self.N, r + self.N
_a = None
while l <= r:
if l % 2 == 1:
_a = self.st[l] if res is None else self.fn(__magic_name__ , self.st[l] )
if r % 2 == 0:
_a = self.st[r] if res is None else self.fn(__magic_name__ , self.st[r] )
_a , _a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ : Union[str, Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
a_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
a_ : Dict = SegmentTree(test_array, min)
a_ : Optional[int] = SegmentTree(test_array, max)
a_ : int = SegmentTree(test_array, lambda a, b: a + b)
def _A () -> None:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert max_range == max_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
a_ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 168
| 0
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = args.log_outputs
__SCREAMING_SNAKE_CASE = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
__SCREAMING_SNAKE_CASE = load_metric("wer" )
__SCREAMING_SNAKE_CASE = load_metric("cer" )
# compute metrics
__SCREAMING_SNAKE_CASE = wer.compute(references=result["target"] , predictions=result["prediction"] )
__SCREAMING_SNAKE_CASE = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
__SCREAMING_SNAKE_CASE = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCAmelCase_ )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(lowerCAmelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__SCREAMING_SNAKE_CASE = f"""log_{dataset_id}_predictions.txt"""
__SCREAMING_SNAKE_CASE = f"""log_{dataset_id}_targets.txt"""
with open(lowerCAmelCase_ , "w" ) as p, open(lowerCAmelCase_ , "w" ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase_ , lowerCAmelCase_ ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(lowerCAmelCase_ , with_indices=lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__SCREAMING_SNAKE_CASE = re.sub(lowerCAmelCase_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__SCREAMING_SNAKE_CASE = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
__SCREAMING_SNAKE_CASE = " ".join(text.split(lowerCAmelCase_ ) )
return text
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCAmelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(args.model_id )
__SCREAMING_SNAKE_CASE = feature_extractor.sampling_rate
# resample audio
__SCREAMING_SNAKE_CASE = dataset.cast_column("audio" , Audio(sampling_rate=lowerCAmelCase_ ) )
# load eval pipeline
if args.device is None:
__SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else -1
__SCREAMING_SNAKE_CASE = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__SCREAMING_SNAKE_CASE = prediction["text"]
__SCREAMING_SNAKE_CASE = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
__SCREAMING_SNAKE_CASE = dataset.map(lowerCAmelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
a__ : Optional[Any] = parser.parse_args()
main(args)
| 195
|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__SCREAMING_SNAKE_CASE = (
"Wrong input data's dimensions... "
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowerCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
__SCREAMING_SNAKE_CASE = (
"Wrong input data's shape... "
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowerCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__SCREAMING_SNAKE_CASE = (
"Input data have different datatype... "
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for value in value_array:
__SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , dataset[0] )
__SCREAMING_SNAKE_CASE = dataset[0].tolist()
for dataset_value in dataset[1:]:
__SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , lowerCAmelCase_ )
if dist > temp_dist:
__SCREAMING_SNAKE_CASE = temp_dist
__SCREAMING_SNAKE_CASE = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ['sentencepiece']
def __init__( self , *_a , **_a ):
requires_backends(self , ['''sentencepiece'''] )
| 45
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''BlipImageProcessor'''
lowerCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , a , a ) -> Tuple:
snake_case_ = False
super().__init__(a , a )
snake_case_ = self.image_processor
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
snake_case_ = self.tokenizer
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
# add pixel_values
snake_case_ = self.image_processor(a , return_tensors=a )
if text is not None:
snake_case_ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
else:
snake_case_ = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def _UpperCamelCase ( self , *a , **a ) -> int:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCamelCase ( self , *a , **a ) -> Any:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 178
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Any = "xlm-roberta"
def __init__( self : int , UpperCamelCase : Optional[int]=3_05_22 , UpperCamelCase : Dict=7_68 , UpperCamelCase : str=12 , UpperCamelCase : Dict=12 , UpperCamelCase : List[str]=30_72 , UpperCamelCase : str="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : Dict=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=1E-1_2 , UpperCamelCase : Any=1 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Any="absolute" , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : int = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Union[str, Any] = layer_norm_eps
lowerCAmelCase__ : int = position_embedding_type
lowerCAmelCase__ : int = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class _lowerCamelCase ( a_ ):
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 355
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=a_ )
class _lowerCamelCase ( a_ ):
_lowerCamelCase :str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase :ClassVar[Features] = Features({"audio": Audio()} )
_lowerCamelCase :ClassVar[Features] = Features({"labels": ClassLabel} )
_lowerCamelCase :str = "audio"
_lowerCamelCase :str = "labels"
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase__ : str = copy.deepcopy(self )
lowerCAmelCase__ : Optional[int] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCAmelCase ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 212
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A__ ( A__ ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''falcon'''
UpperCamelCase_ : List[str] = ['''past_key_values''']
def __init__( self : int , lowerCAmelCase__ : Dict=6_5_0_2_4 , lowerCAmelCase__ : Optional[int]=4_5_4_4 , lowerCAmelCase__ : Optional[Any]=3_2 , lowerCAmelCase__ : Optional[Any]=7_1 , lowerCAmelCase__ : int=1e-5 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : List[str]=1_1 , lowerCAmelCase__ : List[Any]=1_1 , **lowerCAmelCase__ : List[str] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : List[str] = kwargs.pop("n_embed" , snake_case_ )
_UpperCAmelCase : Any = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[str] = layer_norm_epsilon
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : List[Any] = use_cache
_UpperCAmelCase : Union[str, Any] = hidden_dropout
_UpperCAmelCase : Optional[int] = attention_dropout
_UpperCAmelCase : List[str] = bos_token_id
_UpperCAmelCase : Union[str, Any] = eos_token_id
_UpperCAmelCase : str = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : List[str] = alibi
_UpperCAmelCase : Optional[Any] = new_decoder_architecture
_UpperCAmelCase : List[Any] = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[Any] = parallel_attn
_UpperCAmelCase : int = bias
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@property
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return not self.alibi
| 145
|
"""simple docstring"""
from math import pi, sqrt
def lowercase (_lowerCAmelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase ():
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ = 1.0
while num:
SCREAMING_SNAKE_CASE_ = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''')
| 301
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_UpperCAmelCase : List[Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Any:
if subparsers is not None:
lowerCamelCase__ : str = subparsers.add_parser('tpu-config' , description=_description )
else:
lowerCamelCase__ : List[str] = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
lowerCamelCase__ : int = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_UpperCAmelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_UpperCAmelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
lowerCamelCase__ : List[str] = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_UpperCAmelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
lowerCamelCase__ : int = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase__ : Any = defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase__ : Optional[Any] = defaults.commands
if not args.tpu_name:
lowerCamelCase__ : int = defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase__ : Tuple = defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase__ : Optional[int] = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
lowerCamelCase__ : Union[str, Any] = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _UpperCAmelCase ):
lowerCamelCase__ : List[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
lowerCamelCase__ : List[str] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _UpperCAmelCase ):
lowerCamelCase__ : Tuple = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase__ : Tuple = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
lowerCamelCase__ : Optional[int] = '; '.join(_UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase__ : Tuple = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(_UpperCAmelCase )}""" )
return
subprocess.run(_UpperCAmelCase )
print('Successfully setup pod.' )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = tpu_command_parser()
lowerCamelCase__ : Optional[Any] = parser.parse_args()
tpu_command_launcher(_UpperCAmelCase )
| 45
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[Any] = get_failure_array(_UpperCAmelCase )
# 2) Step through text searching for pattern
lowerCamelCase__ , lowerCamelCase__ : List[str] = 0, 0 # index into text, pattern
while i < len(_UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase__ : str = failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
lowerCamelCase__ : int = [0]
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Any = 1
while j < len(_UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase__ : int = failure[i - 1]
continue
j += 1
failure.append(_UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
_UpperCAmelCase : Union[str, Any] = """abc1abc12"""
_UpperCAmelCase : List[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_UpperCAmelCase : Dict = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_UpperCAmelCase : Any = """ABABX"""
_UpperCAmelCase : Union[str, Any] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
_UpperCAmelCase : int = """AAAB"""
_UpperCAmelCase : str = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
_UpperCAmelCase : Optional[Any] = """abcdabcy"""
_UpperCAmelCase : List[Any] = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
_UpperCAmelCase : str = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 45
| 1
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__a : List[Any] = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _UpperCamelCase ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = " " ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = sentence_delimiter
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return list(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = []
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__a : List[str] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__a : Optional[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__a : str = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__a : List[str] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__a : Optional[int] = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
__lowercase = 0
__lowercase = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 210
|
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn
| 210
| 1
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __lt__( self : List[Any] , _lowerCAmelCase : List[Any] ):
return self[-1] < other[-1]
def __eq__( self : Dict , _lowerCAmelCase : int ):
return self[-1] == other[-1]
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list:
SCREAMING_SNAKE_CASE_ = []
# sort into stacks
for element in collection:
SCREAMING_SNAKE_CASE_ = Stack([element] )
SCREAMING_SNAKE_CASE_ = bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if i != len(__UpperCAmelCase ):
stacks[i].append(__UpperCAmelCase )
else:
stacks.append(__UpperCAmelCase )
# use a heap-based merge to merge stack efficiently
SCREAMING_SNAKE_CASE_ = merge(*(reversed(__UpperCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase__ : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 210
|
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCAmelCase_ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 210
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = """blip_text_model"""
def __init__( self , snake_case=3_0524 , snake_case=768 , snake_case=768 , snake_case=3072 , snake_case=768 , snake_case=12 , snake_case=8 , snake_case=512 , snake_case="gelu" , snake_case=1E-12 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=3_0522 , snake_case=2 , snake_case=0 , snake_case=102 , snake_case=True , snake_case=True , **snake_case , ):
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , sep_token_id=snake_case , **snake_case , )
lowercase = vocab_size
lowercase = hidden_size
lowercase = encoder_hidden_size
lowercase = intermediate_size
lowercase = projection_dim
lowercase = hidden_dropout_prob
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = max_position_embeddings
lowercase = layer_norm_eps
lowercase = hidden_act
lowercase = initializer_range
lowercase = attention_probs_dropout_prob
lowercase = is_decoder
lowercase = use_cache
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
lowercase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = """blip_vision_model"""
def __init__( self , snake_case=768 , snake_case=3072 , snake_case=512 , snake_case=12 , snake_case=12 , snake_case=384 , snake_case=16 , snake_case="gelu" , snake_case=1E-5 , snake_case=0.0 , snake_case=1E-10 , **snake_case , ):
super().__init__(**snake_case )
lowercase = hidden_size
lowercase = intermediate_size
lowercase = projection_dim
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = patch_size
lowercase = image_size
lowercase = initializer_range
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = """blip"""
_UpperCamelCase : Any = True
def __init__( self , snake_case=None , snake_case=None , snake_case=512 , snake_case=2.6_592 , snake_case=256 , **snake_case , ):
super().__init__(**snake_case )
if text_config is None:
lowercase = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
lowercase = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
lowercase = BlipTextConfig(**snake_case )
lowercase = BlipVisionConfig(**snake_case )
lowercase = self.vision_config.hidden_size
lowercase = projection_dim
lowercase = logit_scale_init_value
lowercase = 1.0
lowercase = 0.02
lowercase = image_text_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , **snake_case ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.text_config.to_dict()
lowercase = self.vision_config.to_dict()
lowercase = self.__class__.model_type
return output
| 195
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = """xlnet"""
_UpperCamelCase : Optional[Any] = ["""mems"""]
_UpperCamelCase : Tuple = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=3_2000 , snake_case=1024 , snake_case=24 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=True , snake_case="bi" , snake_case=0.02 , snake_case=1E-12 , snake_case=0.1 , snake_case=512 , snake_case=None , snake_case=True , snake_case=False , snake_case=False , snake_case=-1 , snake_case=False , snake_case="last" , snake_case=True , snake_case="tanh" , snake_case=0.1 , snake_case=5 , snake_case=5 , snake_case=5 , snake_case=1 , snake_case=2 , **snake_case , ):
lowercase = vocab_size
lowercase = d_model
lowercase = n_layer
lowercase = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowercase = d_model // n_head
lowercase = ff_activation
lowercase = d_inner
lowercase = untie_r
lowercase = attn_type
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = dropout
lowercase = mem_len
lowercase = reuse_len
lowercase = bi_data
lowercase = clamp_len
lowercase = same_length
lowercase = summary_type
lowercase = summary_use_proj
lowercase = summary_activation
lowercase = summary_last_dropout
lowercase = start_n_top
lowercase = end_n_top
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , snake_case , )
lowercase = kwargs['use_cache']
lowercase = use_mems_eval
lowercase = use_mems_train
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 195
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
lowercase_ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
lowercase_ = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Use FP16 to accelerate inference."""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Benchmark training of model"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Verbose memory tracing"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Trace memory line by line"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Save result to a CSV file"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Save all print statements in a log file"""} )
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """Whether to print environment information"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
lowercase_ = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
lowercase_ = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
lowercase_ = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
lowercase_ = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
lowercase_ = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
lowercase_ = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
lowercase_ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def snake_case ( self : List[Any] ):
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , SCREAMING_SNAKE_CASE , )
def snake_case ( self : Dict ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case ( self : Union[str, Any] ):
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case ( self : Dict ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 121
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase__ = 1.054571817e-34 # unit of ℏ : J * s
lowerCAmelCase__ = 3e8 # unit of c : m * s^-1
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowercase__ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase__ : str = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase__ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121
| 1
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase_ = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
UpperCamelCase_ = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Dict:
return FSMTTokenizer.from_pretrained(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
a_ = FSMTForConditionalGeneration.from_pretrained(__UpperCAmelCase).to(__UpperCAmelCase)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
])
@slow
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = F'''facebook/wmt19-{pair}'''
a_ = self.get_tokenizer(__UpperCAmelCase)
a_ = self.get_model(__UpperCAmelCase)
a_ = bleu_data[pair]['src']
a_ = bleu_data[pair]['tgt']
a_ = tokenizer(__UpperCAmelCase , return_tensors="pt" , truncation=__UpperCAmelCase , padding="longest").to(__UpperCAmelCase)
a_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
a_ = tokenizer.batch_decode(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase)
a_ = calculate_bleu(__UpperCAmelCase , __UpperCAmelCase)
print(__UpperCAmelCase)
self.assertGreaterEqual(scores["bleu"] , __UpperCAmelCase)
| 243
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = args.log_outputs
lowerCAmelCase__ : Union[str, Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase__ : Dict = load_metric('wer' )
lowerCAmelCase__ : Tuple = load_metric('cer' )
# compute metrics
lowerCAmelCase__ : Dict = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase__ : int = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase__ : Optional[int] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase__ : List[str] = F'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase__ : Union[str, Any] = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as p, open(SCREAMING_SNAKE_CASE_ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(SCREAMING_SNAKE_CASE_ , with_indices=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : List[str] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase__ : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase__ : List[Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowerCAmelCase__ : Optional[int] = ' '.join(text.split(SCREAMING_SNAKE_CASE_ ) )
return text
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# load dataset
lowerCAmelCase__ : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase__ : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase__ : Union[str, Any] = dataset.cast_column('audio' , Audio(sampling_rate=SCREAMING_SNAKE_CASE_ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase__ : List[Any] = 0 if torch.cuda.is_available() else -1
lowerCAmelCase__ : List[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase__ : int = prediction['text']
lowerCAmelCase__ : Optional[int] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase__ : Dict = dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 212
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> bool:
'''simple docstring'''
return self.head == self.tail
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tail + 1
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.data[self.head]
SCREAMING_SNAKE_CASE = self.head + 1
return ret
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : List[str] ,lowerCamelCase__ : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 1
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
return self.data
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> MyNode | None:
'''simple docstring'''
return self.left
def SCREAMING_SNAKE_CASE__ ( self : str ) -> MyNode | None:
'''simple docstring'''
return self.right
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
return self.height
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = node
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = node
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> MyNode:
'''simple docstring'''
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> MyNode:
'''simple docstring'''
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> MyNode:
'''simple docstring'''
SCREAMING_SNAKE_CASE = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_SCREAMING_SNAKE_CASE ) )
return right_rotation(_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> MyNode:
'''simple docstring'''
SCREAMING_SNAKE_CASE = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_SCREAMING_SNAKE_CASE ) )
return left_rotation(_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(_SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE = right_rotation(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = lr_rotation(_SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() , _SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE = node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE = rl_rotation(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = left_rotation(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
return node
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE = root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE = right_child
return root.get_data()
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE = root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE = left_child
return root.get_data()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> MyNode | None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = root.get_left()
SCREAMING_SNAKE_CASE = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE = get_left_most(_SCREAMING_SNAKE_CASE )
root.set_data(_SCREAMING_SNAKE_CASE )
root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE = left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE = left_rotation(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = rl_rotation(_SCREAMING_SNAKE_CASE )
elif get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE = right_rotation(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = lr_rotation(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_SCREAMING_SNAKE_CASE )
return root
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = insert_node(self.root ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCamelCase__ ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE = del_node(self.root ,lowerCamelCase__ )
def __str__( self : str ,) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE = """"""
SCREAMING_SNAKE_CASE = MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE = self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE = 0
while not q.is_empty():
SCREAMING_SNAKE_CASE = q.pop()
SCREAMING_SNAKE_CASE = """ """ * int(math.pow(2 ,layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCamelCase__ )
q.push(lowerCamelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 ,lowerCamelCase__ ) - 1:
SCREAMING_SNAKE_CASE = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __lowercase ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE_ = AVLtree()
SCREAMING_SNAKE_CASE_ = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 193
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : Any=[10, 20, 30, 40] ,lowerCamelCase__ : Optional[Any]=[1, 1, 2, 1] ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Tuple="relu" ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Optional[int]=None ,) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embeddings_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case : Tuple = False
__snake_case : int = False
__snake_case : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ : Dict ,**lowerCamelCase__ : Optional[Any] ):
return model(pixel_values=lowerCamelCase__ ,**lowerCamelCase__ )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = (1, 1000)
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 193
| 1
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int = 600851475143 ) -> int:
try:
__a = int(lowerCAmelCase__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__a = 2
__a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__a = i
while n % i == 0:
__a = n // i
i += 1
return int(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 45
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 45
| 1
|
_SCREAMING_SNAKE_CASE = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_SCREAMING_SNAKE_CASE = [{'type': 'code', 'content': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 81
|
def snake_case ( ) -> Any:
for n in range(1 , 1_000_000):
yield n * (n + 1) // 2
def snake_case ( snake_case__ :Dict) -> Optional[Any]:
_A = 1
_A = 2
while i * i <= n:
_A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case ( ) -> Optional[Any]:
return next(i for i in triangle_number_generator() if count_divisors(snake_case__) > 500)
if __name__ == "__main__":
print(solution())
| 81
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = tmp_path / '''cache'''
__lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = tmp_path / '''cache'''
__lowercase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
__lowercase = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = tmp_path / '''cache'''
__lowercase = os.path.join(lowercase , '''tmp.sql''' )
__lowercase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
__lowercase = iter_sql_file(lowercase )
__lowercase = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = tmp_path / '''cache'''
__lowercase = os.path.join(lowercase , '''tmp.sql''' )
__lowercase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
__lowercase = iter_sql_file(lowercase )
__lowercase = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase ( lowercase , lowercase , lowercase ):
"""simple docstring"""
__lowercase = tmp_path / '''cache'''
__lowercase = os.path.join(lowercase , '''tmp.sql''' )
__lowercase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 210
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__a : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowercase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__lowercase = parser.parse_args()
return args
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def fn(lowercase ):
return tokenizer(examples['''text'''] )
return fn
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__lowercase = tf.train.Features(feature=lowercase )
__lowercase = tf.train.Example(features=lowercase )
__lowercase = example.SerializeToString()
records.append(lowercase )
return records
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(lowercase ) , args.limit )
__lowercase = dataset.select(range(lowercase ) )
print(F"Limiting the dataset to {args.limit} entries." )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(lowercase )
__lowercase = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot['''input_ids'''] )
__lowercase = os.path.join(lowercase , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__lowercase = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
__lowercase = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=lowercase )
if __name__ == "__main__":
__a : Optional[Any] = parse_args()
main(args)
| 210
| 1
|
def lowerCAmelCase_ ( _lowercase : list[list]) -> list[list]:
"""simple docstring"""
a__ : List[str] = current_set.copy()
for row_index, row in enumerate(_lowercase):
a__ : Union[str, Any] = row[0]
for column_index, column in enumerate(_lowercase):
if magnitude == 0:
a__ : int = column
continue
a__ : List[str] = column / magnitude
# Subtract to cancel term
a__ : Optional[Any] = current_set[0]
a__ : int = [first_row]
a__ : Tuple = current_set[1::]
for row in current_set:
a__ : int = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowercase)
continue
for column_index in range(len(_lowercase)):
temp_row.append(first_row[column_index] - row[column_index])
final_set.append(_lowercase)
# Create next recursion iteration set
if len(final_set[0]) != 3:
a__ : Union[str, Any] = final_set[0]
a__ : Union[str, Any] = []
a__ : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0])
next_iteration.append(row[1::])
a__ : Union[str, Any] = simplify(_lowercase)
for i in range(len(_lowercase)):
resultant[i].insert(0 , current_first_column[i])
resultant.insert(0 , _lowercase)
a__ : Tuple = resultant
return final_set
def lowerCAmelCase_ ( _lowercase : list[list]) -> list:
"""simple docstring"""
if len(_lowercase) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""")
a__ : List[Any] = len(_lowercase) + 1
if any(len(_lowercase) != _length for item in equations):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""")
for row in equations:
if any(not isinstance(_lowercase , (int, float)) for column in row):
raise ValueError("""solve_simultaneous() requires lists of integers""")
if len(_lowercase) == 1:
return [equations[0][-1] / equations[0][0]]
a__ : Union[str, Any] = equations.copy()
if any(0 in row for row in data_set):
a__ : Optional[Any] = data_set.copy()
a__ : Tuple = []
for row_index, row in enumerate(_lowercase):
if 0 not in row:
a__ : Tuple = data_set.pop(_lowercase)
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""")
data_set.insert(0 , _lowercase)
a__ : List[str] = data_set.copy()
a__ : Optional[int] = simplify(_lowercase)
a__ : Dict = simplified[::-1]
a__ : list = []
for row in simplified:
a__ : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0)
continue
solutions.append(current_solution / row[-2])
continue
a__ : str = row.copy()[: len(_lowercase) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0)
if len(_lowercase) == 0:
solutions.append(0)
continue
a__ : List[str] = temp_row[1::]
a__ : Optional[Any] = temp_row[::-1]
for column_index, column in enumerate(_lowercase):
current_solution -= column * solutions[column_index]
solutions.append(_lowercase)
a__ : int = []
for item in solutions:
final.append(float(round(_lowercase , 5)))
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 266
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case__ (ctypes.Structure ):
"""simple docstring"""
__lowerCAmelCase :Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : int = CursorInfo()
a__ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : List[str] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25l""")
sys.stdout.flush()
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
a__ : List[Any] = CursorInfo()
a__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
a__ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowercase , ctypes.byref(_lowercase))
elif os.name == "posix":
sys.stdout.write("""\033[?25h""")
sys.stdout.flush()
@contextmanager
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266
| 1
|
def lowerCamelCase__ ( a ) -> list:
if any(not isinstance(a , a ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(a ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(a , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 121
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( a ) -> int:
_A: int = filter(lambda a : p.requires_grad , model.parameters() )
_A: Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ : str = logging.getLogger(__name__)
def lowerCamelCase__ ( a , a ) -> Dict:
if metric == "rouge2":
_A: Tuple = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_A: List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_A: str = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_A: List[str] = ModelCheckpoint(
dirpath=a , filename=a , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( a , a ) -> Optional[Any]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a , verbose=a , )
class UpperCAmelCase ( pl.Callback ):
'''simple docstring'''
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Union[str, Any] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=True ):
"""simple docstring"""
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_A: Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_A: Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A: List[str] = od / '''test_results.txt'''
_A: Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A: Any = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_A: Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A: Optional[int] = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
_A: List[str] = val.item()
_A: List[Any] = F"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
_A: Optional[int] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase_ )
@rank_zero_only
def __magic_name__ ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
try:
_A: Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
_A: Any = pl_module.model.num_parameters()
_A: Optional[int] = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , '''test''' )
@rank_zero_only
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 121
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__: List[Any] = logging.getLogger()
def UpperCamelCase__( )->Union[str, Any]:
A__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A__ = parser.parse_args()
return args.f
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self ):
A__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0,'''run_glue_deebert.py''' )
with patch.object(__lowerCamelCase,'''argv''',__lowerCamelCase ):
A__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__lowerCamelCase,0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self ):
A__ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__lowerCamelCase )
| 193
|
import numpy as np
def UpperCamelCase__( UpperCamelCase__ : np.array )->np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
| 1
|
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : List[str] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCAmelCase_ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 364
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
lowerCAmelCase_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=256 ) -> Optional[int]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : int = os.path.join(_UpperCamelCase , '''tmp''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Dict = read_json(os.path.join(_UpperCamelCase , '''params.json''' ) )
snake_case_ : Tuple = NUM_SHARDS[model_size]
snake_case_ : Optional[Any] = params['''n_layers''']
snake_case_ : int = params['''n_heads''']
snake_case_ : Dict = n_heads // num_shards
snake_case_ : List[Any] = params['''dim''']
snake_case_ : str = dim // n_heads
snake_case_ : Any = 10_000.0
snake_case_ : Any = 1.0 / (base ** (torch.arange(0 , _UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ : Optional[Any] = params['''n_kv_heads'''] # for GQA / MQA
snake_case_ : Optional[Any] = n_heads_per_shard // num_key_value_heads
snake_case_ : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ : str = n_heads
snake_case_ : Optional[int] = n_heads_per_shard
snake_case_ : str = dim
# permute for sliced rotary
def permute(_UpperCamelCase , _UpperCamelCase=n_heads , _UpperCamelCase=dim , _UpperCamelCase=dim ):
return w.view(_UpperCamelCase , dima // n_heads // 2 , 2 , _UpperCamelCase ).transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ : Optional[Any] = torch.load(os.path.join(_UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
snake_case_ : Union[str, Any] = [
torch.load(os.path.join(_UpperCamelCase , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(_UpperCamelCase )
]
snake_case_ : Optional[Any] = 0
snake_case_ : str = {'''weight_map''': {}}
for layer_i in range(_UpperCamelCase ):
snake_case_ : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : str = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ : Union[str, Any] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case_ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Optional[int] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
snake_case_ : int = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : Union[str, Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : str = inv_freq
for k, v in state_dict.items():
snake_case_ : Dict = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Any = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : List[str] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
snake_case_ : Dict = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case_ : List[str] = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
# Write configs
snake_case_ : int = {'''total_size''': param_count * 2}
write_json(_UpperCamelCase , os.path.join(_UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
snake_case_ : str = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
snake_case_ : Optional[int] = params['''multiple_of'''] if '''multiple_of''' in params else 256
snake_case_ : Optional[Any] = LlamaConfig(
hidden_size=_UpperCamelCase , intermediate_size=compute_intermediate_size(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_UpperCamelCase , )
config.save_pretrained(_UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
snake_case_ : Union[str, Any] = LlamaForCausalLM.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_UpperCamelCase , safe_serialization=_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case_ : Union[str, Any] = tokenizer_class(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
snake_case_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case_ : Dict = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279
| 0
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __A :
"""simple docstring"""
pass
| 81
|
"""simple docstring"""
def _A ( ):
"""simple docstring"""
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def _A ( lowercase ):
"""simple docstring"""
a =1
a =2
while i * i <= n:
a =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _A ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(lowercase ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 81
| 1
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple="shi-labs/oneformer_demo" ):
with open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) as f:
__a : Optional[Any] = json.load(lowerCAmelCase__ )
__a : Any = {}
__a : Dict = []
__a : Tuple = []
for key, info in class_info.items():
__a : Optional[Any] = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase__ ) )
__a : Optional[int] = thing_ids
__a : Optional[Any] = class_names
return metadata
class UpperCamelCase__ ( unittest.TestCase ):
def __init__(self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any]=7 , snake_case_ : int=3 , snake_case_ : List[Any]=3_0 , snake_case_ : List[Any]=4_0_0 , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=True , snake_case_ : Dict=[0.5, 0.5, 0.5] , snake_case_ : Tuple=[0.5, 0.5, 0.5] , snake_case_ : List[Any]=1_0 , snake_case_ : List[Any]=False , snake_case_ : Dict=2_5_5 , snake_case_ : List[Any]="shi-labs/oneformer_demo" , snake_case_ : int="ade20k_panoptic.json" , snake_case_ : List[Any]=1_0 , ):
__a : List[Any] = parent
__a : List[Any] = batch_size
__a : Any = num_channels
__a : Tuple = min_resolution
__a : int = max_resolution
__a : Optional[Any] = do_resize
__a : Tuple = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
__a : Dict = do_normalize
__a : Optional[int] = image_mean
__a : Dict = image_std
__a : Tuple = class_info_file
__a : int = prepare_metadata(snake_case_ , snake_case_ )
__a : Optional[Any] = num_text
__a : Any = repo_path
# for the post_process_functions
__a : Any = 2
__a : Union[str, Any] = 1_0
__a : Optional[Any] = 1_0
__a : str = 3
__a : str = 4
__a : Optional[Any] = num_labels
__a : Union[str, Any] = do_reduce_labels
__a : Optional[Any] = ignore_index
def lowerCAmelCase (self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCAmelCase (self : Any , snake_case_ : Optional[Any] , snake_case_ : int=False ):
if not batched:
__a : Optional[int] = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__a , __a : Dict = image.size
else:
__a , __a : int = image.shape[1], image.shape[2]
if w < h:
__a : Dict = int(self.size['''shortest_edge'''] * h / w )
__a : int = self.size['''shortest_edge''']
elif w > h:
__a : int = self.size['''shortest_edge''']
__a : str = int(self.size['''shortest_edge'''] * w / h )
else:
__a : List[Any] = self.size['''shortest_edge''']
__a : str = self.size['''shortest_edge''']
else:
__a : List[Any] = []
for image in image_inputs:
__a , __a : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : Optional[Any] = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__a : Optional[int] = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def lowerCAmelCase (self : Union[str, Any] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_SCREAMING_SNAKE_CASE : Optional[int] = image_processing_class
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = OneFormerImageProcessorTester(self )
@property
def lowerCAmelCase (self : List[str] ):
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCAmelCase (self : Optional[int] ):
__a : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
self.assertTrue(hasattr(snake_case_ , '''ignore_index''' ) )
self.assertTrue(hasattr(snake_case_ , '''class_info_file''' ) )
self.assertTrue(hasattr(snake_case_ , '''num_text''' ) )
self.assertTrue(hasattr(snake_case_ , '''repo_path''' ) )
self.assertTrue(hasattr(snake_case_ , '''metadata''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_reduce_labels''' ) )
def lowerCAmelCase (self : Optional[int] ):
pass
def lowerCAmelCase (self : str ):
# Initialize image_processor
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__a : Any = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Union[str, Any] = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__a : str = image_processor(
snake_case_ , ['''semantic'''] * len(snake_case_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : List[Any] ):
# Initialize image_processor
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__a : Tuple = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__a , __a : str = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__a : List[Any] = image_processor(
snake_case_ , ['''semantic'''] * len(snake_case_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : List[Any] ):
# Initialize image_processor
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__a : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : str = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__a : int = image_processor(
snake_case_ , ['''semantic'''] * len(snake_case_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : List[str] , snake_case_ : Any=False , snake_case_ : List[str]=False , snake_case_ : int="np" ):
__a : int = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__a : Union[str, Any] = self.image_processing_tester.num_labels
__a : List[Any] = None
__a : Optional[Any] = None
__a : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__a : Tuple = num_labels
if is_instance_map:
__a : List[str] = list(range(snake_case_ ) ) * 2
__a : Dict = dict(enumerate(snake_case_ ) )
__a : List[Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__a : List[Any] = [Image.fromarray(snake_case_ ) for annotation in annotations]
__a : Tuple = image_processor(
snake_case_ , ['''semantic'''] * len(snake_case_ ) , snake_case_ , return_tensors='''pt''' , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def lowerCAmelCase (self : str ):
pass
def lowerCAmelCase (self : int ):
def common(snake_case_ : Any=False , snake_case_ : Union[str, Any]=None ):
__a : int = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__a : int = inputs['''mask_labels''']
__a : Optional[Any] = inputs['''class_labels''']
__a : List[Any] = inputs['''pixel_values''']
__a : Optional[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type='''pil''' )
common(is_instance_map=snake_case_ , segmentation_type='''pil''' )
def lowerCAmelCase (self : Dict ):
__a : Dict = np.zeros((2_0, 5_0) )
__a : Tuple = 1
__a : int = 1
__a : List[Any] = 1
__a : Dict = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def lowerCAmelCase (self : Dict ):
__a : Dict = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__a : str = self.image_processing_tester.get_fake_oneformer_outputs()
__a : Optional[int] = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__a : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__a : str = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCAmelCase (self : Any ):
__a : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__a : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
__a : List[Any] = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , snake_case_ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCAmelCase (self : Any ):
__a : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__a : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
__a : List[Any] = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , snake_case_ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 90
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Tuple ):
__a : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case_ ).to(snake_case_ )
__a : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a : Optional[int] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__a : Dict = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__a : Optional[Any] = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
__a : Tuple = -(labels.shape[-1] * loss.item())
__a : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 90
| 1
|
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = RoFormerTokenizer
A_ : Any = RoFormerTokenizerFast
A_ : Dict = True
A_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
super().setUp()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], **_lowerCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''', **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], **_lowerCamelCase : Any ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''', **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = '''永和服装饰品有限公司,今天天气非常好'''
__A = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.get_tokenizer()
__A , __A = self.get_chinese_input_output_texts()
__A = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, output_text.split() )
__A = tokens + [tokenizer.unk_token]
__A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.get_rust_tokenizer()
__A , __A = self.get_chinese_input_output_texts()
__A = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, output_text.split() )
__A = tokens + [tokenizer.unk_token]
__A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
| 266
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase_ = imread(R'digital_image_processing/image_data/lena_small.jpg')
lowercase_ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase ( ):
"""simple docstring"""
__A = cn.convert_to_negative(__UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase ( ):
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__UpperCamelCase , 1_1_0 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__A = canny.canny(__UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__A = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase )
assert res.any()
def lowerCAmelCase ( ):
"""simple docstring"""
assert med.median_filter(__UpperCamelCase , 3 ).any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A , __A = sob.sobel_filter(__UpperCamelCase )
assert grad.any() and theta.any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = sp.make_sepia(__UpperCamelCase , 2_0 )
assert sepia.all()
def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
__A = bs.Burkes(imread(__UpperCamelCase , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
__A = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__A = imread(__UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__A = 0
__A = 0
__A = image[x_coordinate][y_coordinate]
__A = lbp.get_neighbors_pixel(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__A = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__A = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert lbp_image.any()
| 266
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''PoolFormerFeatureExtractor''']
__UpperCAmelCase = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 360
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
lowerCAmelCase__ = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ = text_classifier('''This is great !''' , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> int:
import torch
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = pipeline('''text-classification''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = pipeline('''text-classification''' , framework='''tf''' )
lowerCAmelCase__ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = TextClassificationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ = '''HuggingFace is in'''
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowerCAmelCase__ = ['''HuggingFace is in ''', '''Paris is in France''']
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}, {'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ = text_classifier(lowerCamelCase_ , top_k=lowerCamelCase_ )
lowerCAmelCase__ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [[{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] * N, [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] * N] , )
lowerCAmelCase__ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
lowerCAmelCase__ = text_classifier(lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowerCamelCase_ ):
text_classifier(lowerCamelCase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'''label''': ANY(lowerCamelCase_ ), '''score''': ANY(lowerCamelCase_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 228
| 0
|
'''simple docstring'''
from math import factorial
UpperCamelCase__: dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def snake_case_ ( _lowerCAmelCase : int ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCAmelCase ) )
def snake_case_ ( _lowerCAmelCase : int = 60 , _lowerCAmelCase : int = 1000000 ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCAmelCase : List[str] = 0
# the cached sizes of the previous chains
UpperCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 , _lowerCAmelCase ):
# The temporary set will contain the elements of the chain
UpperCAmelCase : List[Any] = set()
UpperCAmelCase : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCAmelCase )
chain_set_length += 1
UpperCAmelCase : int = digit_factorial_sum(_lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase : int = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution()}")
| 23
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase__: Tuple = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def A ( cls : List[str] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : int ) -> Tuple:
UpperCAmelCase : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case , repo_id='''test-model-flax''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(__snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__snake_case , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__snake_case , 1E-3 , msg=F"""{key} not identical""" )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ) -> Union[str, Any]:
UpperCAmelCase : str = True
UpperCAmelCase : int = flatten_dict(modela.params )
UpperCAmelCase : Dict = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase : Dict = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
UpperCAmelCase : int = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Dict = FlaxBertModel(__snake_case )
UpperCAmelCase : Optional[int] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__snake_case , __snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertTrue(check_models_equal(__snake_case , __snake_case ) )
def A ( self : Optional[int] ) -> str:
UpperCAmelCase : Dict = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : Optional[int] = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__snake_case ):
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(__snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(__snake_case , subfolder=__snake_case )
self.assertIsNotNone(__snake_case )
| 23
| 1
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-t5'''
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''This is me''',return_tensors='''pt''' )
A__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
A__ = model.generate(**__lowerCamelCase )
A__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
A__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
A__ = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase,__lowerCamelCase ) )
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-t5'''
A__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
A__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
A__ = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 39
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 39
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''perceiver'''
def __init__( self : int , lowerCamelCase_ : List[str]=2_56 , lowerCamelCase_ : Optional[int]=12_80 , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : str=26 , lowerCamelCase_ : Optional[Any]=8 , lowerCamelCase_ : List[str]=8 , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[str]="kv" , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : Any=1e-12 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=2_62 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Union[str, Any]=56 , lowerCamelCase_ : Union[str, Any]=[3_68, 4_96] , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : Optional[Any]=19_20 , lowerCamelCase_ : Tuple=16 , lowerCamelCase_ : Dict=[1, 16, 2_24, 2_24] , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_latents
SCREAMING_SNAKE_CASE : Union[str, Any] = d_latents
SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE : str = num_blocks
SCREAMING_SNAKE_CASE : Optional[int] = num_self_attends_per_block
SCREAMING_SNAKE_CASE : Optional[int] = num_self_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = num_cross_attention_heads
SCREAMING_SNAKE_CASE : Dict = qk_channels
SCREAMING_SNAKE_CASE : Optional[Any] = v_channels
SCREAMING_SNAKE_CASE : Tuple = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE : Union[str, Any] = self_attention_widening_factor
SCREAMING_SNAKE_CASE : Optional[int] = cross_attention_widening_factor
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE : Tuple = image_size
# flow attributes
SCREAMING_SNAKE_CASE : Any = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE : Any = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = audio_samples_per_frame
SCREAMING_SNAKE_CASE : Any = samples_per_patch
SCREAMING_SNAKE_CASE : Optional[Any] = output_shape
class UpperCamelCase__ ( _a ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 1e-4
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple = -1 , lowerCamelCase_ : Tuple = -1 , lowerCamelCase_ : Any = -1 , lowerCamelCase_ : Any = False , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Dict = 3 , lowerCamelCase_ : List[str] = 40 , lowerCamelCase_ : Optional[Any] = 40 , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = preprocessor.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Tuple = [''' '''.join(["""a"""] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : str = dict(preprocessor(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE : str = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = dict(preprocessor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 323
|
from math import factorial
lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) )
def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
snake_case_ : Optional[Any] = 0
# the cached sizes of the previous chains
snake_case_ : dict[int, int] = {}
for start_chain_element in range(1 , _UpperCamelCase ):
# The temporary set will contain the elements of the chain
snake_case_ : List[str] = set()
snake_case_ : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case_ : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCamelCase )
chain_set_length += 1
snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case_ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 279
| 0
|
import torch
from torch import nn
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : Dict=False ) -> int:
super().__init__()
lowerCAmelCase__ = n_token
lowerCAmelCase__ = d_embed
lowerCAmelCase__ = d_proj
lowerCAmelCase__ = cutoffs + [n_token]
lowerCAmelCase__ = [0] + self.cutoffs
lowerCAmelCase__ = div_val
lowerCAmelCase__ = self.cutoffs[0]
lowerCAmelCase__ = len(self.cutoffs ) - 1
lowerCAmelCase__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ = nn.ModuleList()
lowerCAmelCase__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCamelCase , __lowerCamelCase ) ) )
else:
self.out_projs.append(__lowerCamelCase )
self.out_layers.append(nn.Linear(__lowerCamelCase , __lowerCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCamelCase , __lowerCamelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCamelCase , r_idx - l_idx ) )
lowerCAmelCase__ = keep_order
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
if proj is None:
lowerCAmelCase__ = nn.functional.linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ = nn.functional.linear(__lowerCamelCase , proj.t().contiguous() )
lowerCAmelCase__ = nn.functional.linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int=False ) -> int:
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ = labels[..., 1:].contiguous()
lowerCAmelCase__ = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
lowerCAmelCase__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ = self._compute_logit(__lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ = labels != -100
lowerCAmelCase__ = torch.zeros_like(__lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ = (
-nn.functional.log_softmax(__lowerCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ = nn.functional.log_softmax(__lowerCamelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ = self.out_layers[i].weight
lowerCAmelCase__ = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCamelCase )
biases.append(__lowerCamelCase )
lowerCAmelCase__ = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ = self._compute_logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = nn.functional.log_softmax(__lowerCamelCase , dim=1 )
if labels is None:
lowerCAmelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ = torch.zeros_like(__lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] + self.cutoffs
for i in range(len(__lowerCamelCase ) - 1 ):
lowerCAmelCase__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ = labels.index_select(0 , __lowerCamelCase ) - l_idx
lowerCAmelCase__ = head_logprob.index_select(0 , __lowerCamelCase )
lowerCAmelCase__ = hidden.index_select(0 , __lowerCamelCase )
else:
lowerCAmelCase__ = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ = self._compute_logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = nn.functional.log_softmax(__lowerCamelCase , dim=1 )
lowerCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
if self.n_clusters == 0:
lowerCAmelCase__ = self._compute_logit(__lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCamelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ = self.out_layers[i].weight
lowerCAmelCase__ = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCamelCase )
biases.append(__lowerCamelCase )
lowerCAmelCase__ = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ = self._compute_logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ = nn.functional.log_softmax(__lowerCamelCase , dim=1 )
lowerCAmelCase__ = [0] + self.cutoffs
for i in range(len(__lowerCamelCase ) - 1 ):
lowerCAmelCase__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ = self._compute_logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = nn.functional.log_softmax(__lowerCamelCase , dim=1 )
lowerCAmelCase__ = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ = logprob_i
return out
| 356
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , **lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 221
| 0
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = VideoToVideoSDPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
snake_case_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case_ = False
# No `output_type`.
snake_case_ = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
__lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> str:
'''simple docstring'''
# 3 frames
__lowerCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = VideoToVideoSDPipeline(**lowerCamelCase__ )
__lowerCamelCase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = 'np'
__lowerCamelCase = sd_pipe(**lowerCamelCase__ ).frames
__lowerCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__lowerCamelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self ) -> str:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__lowerCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase = torch.randn((1, 10, 3, 1_024, 576) , generator=lowerCamelCase__ )
__lowerCamelCase = video.to('cuda' )
__lowerCamelCase = 'Spiderman is surfing'
__lowerCamelCase = pipe(lowerCamelCase__ , video=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=3 , output_type='pt' ).frames
__lowerCamelCase = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 90
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__lowerCamelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.reshape(lowerCamelCase__ , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.asarray(reshape(lowerCamelCase__ , (12, 5) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 90
| 1
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default='Translation' , init=snake_case_ , repr=snake_case_ )
def __call__( self ) -> Optional[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE_( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default='TranslationVariableLanguages' , init=snake_case_ , repr=snake_case_ )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ) -> List[Any]:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
lowerCamelCase_ = set(self.languages )
if self.languages and set(_A ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(_A ) - lang_set ) )}) are not in valid set ({", ".join(_A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(_A , _A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ = zip(*sorted(_A ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE_( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 371
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 47
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__snake_case = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def a ( __a , __a ) -> Optional[Any]:
'''simple docstring'''
inspect_dataset(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__ :Tuple = path + '''.py'''
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def a ( __a , __a ) -> Union[str, Any]:
'''simple docstring'''
inspect_metric(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__ :List[Any] = path + '''.py'''
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a ( __a , __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
with pytest.raises(__lowerCamelCase ):
get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def a ( __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[str] = get_dataset_config_names(__lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :int = get_dataset_infos(__lowerCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCamelCase__ :Union[str, Any] = expected_configs[0]
assert expected_config in infos
UpperCamelCase__ :int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :str = get_dataset_infos(__lowerCamelCase )
assert expected_config in infos
UpperCamelCase__ :Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
with pytest.raises(__lowerCamelCase ):
get_dataset_split_names(__lowerCamelCase , config_name=__lowerCamelCase )
| 97
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__UpperCamelCase : Dict[Optional[str], str] = {}
__UpperCamelCase : Dict[Optional[str], Exception] = {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ) -> Optional[int]:
a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
a = format_type
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ) -> List[str]:
a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
__UpperCamelCase : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
__UpperCamelCase : List[str] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
__UpperCamelCase : List[str] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __A ( __lowerCamelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __A ( __lowerCamelCase , **__lowerCamelCase ) -> Formatter:
a = get_format_type_from_alias(__lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 228
| 0
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
while a != 0:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b % a, a
return b
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
if gcd(__UpperCAmelCase , __UpperCAmelCase ) != 1:
lowerCAmelCase__ : Optional[Any] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 1, 0, a
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 1, m
while va != 0:
lowerCAmelCase__ : Optional[Any] = ua // va
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 212
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A = """base_with_context"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowerCAmelCase__ : int = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : str = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : str = ly_weight["""attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : int = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Any = ly_weight["""attention"""]
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ : List[Any] = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = ly_weight["""self_attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowerCAmelCase__ : Dict = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowerCAmelCase__ : Tuple = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowerCAmelCase__ : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ : Optional[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowerCAmelCase__ : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_A = parser.parse_args()
main(args)
| 212
| 1
|
def __A ( __lowerCAmelCase = 1_000 )-> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = 2
while True:
_UpperCAmelCase = 0
_UpperCAmelCase = fa + fa
_UpperCAmelCase , _UpperCAmelCase = fa, f
index += 1
for _ in str(__lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 39
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_a = get_logger(__name__)
class __lowerCamelCase ( enum.Enum):
"""simple docstring"""
UpperCamelCase__ = "all_checks"
UpperCamelCase__ = "basic_checks"
UpperCamelCase__ = "no_checks"
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None )-> str:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase = ' for ' + verification_name if verification_name is not None else ''
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
_UpperCAmelCase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def __A ( __lowerCAmelCase , __lowerCAmelCase = True )-> dict:
"""simple docstring"""
if record_checksum:
_UpperCAmelCase = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(__lowerCAmelCase )
_UpperCAmelCase = m.hexdigest()
else:
_UpperCAmelCase = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 39
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = ['pixel_values']
def __init__( self : int, a_ : bool = True, a_ : Optional[Dict[str, int]] = None, a_ : PILImageResampling = PILImageResampling.BILINEAR, a_ : bool = True, a_ : Dict[str, int] = None, a_ : bool = True, a_ : Union[int, float] = 1 / 255, a_ : bool = True, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 256}
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase__ = get_size_dict(a_ )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : str, a_ : np.ndarray, a_ : Dict[str, int], a_ : PILImageResampling = PILImageResampling.BICUBIC, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : int, ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ = get_resize_output_image_size(a_, size=size["shortest_edge"], default_to_square=a_ )
return resize(a_, size=a_, resample=a_, data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : np.ndarray, a_ : Dict[str, int], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_ )
return center_crop(a_, size=(size["height"], size["width"]), data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : np.ndarray, a_ : float, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : List[Any] ):
"""simple docstring"""
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase_ ( self : Optional[Any], a_ : np.ndarray, a_ : Union[float, List[float]], a_ : Union[float, List[float]], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Any, ):
"""simple docstring"""
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : ImageInput, a_ : Optional[bool] = None, a_ : Dict[str, int] = None, a_ : PILImageResampling = None, a_ : bool = None, a_ : Dict[str, int] = None, a_ : Optional[bool] = None, a_ : Optional[float] = None, a_ : Optional[bool] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[str, TensorType]] = None, a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST, **a_ : str, ):
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a_ )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a_, size=a_, resample=a_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a_, size=a_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a_, a_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 31
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = 'codegen'
lowerCAmelCase : Optional[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,_UpperCAmelCase : List[Any]=50400 ,_UpperCAmelCase : Dict=2048 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Union[str, Any]=4096 ,_UpperCAmelCase : List[Any]=28 ,_UpperCAmelCase : int=16 ,_UpperCAmelCase : Optional[Any]=64 ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[str]="gelu_new" ,_UpperCAmelCase : Union[str, Any]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Optional[int]=1E-5 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Any=50256 ,_UpperCAmelCase : int=50256 ,_UpperCAmelCase : Any=False ,**_UpperCAmelCase : List[str] ,):
_a : Optional[Any] = vocab_size
_a : Optional[int] = n_ctx
_a : Dict = n_positions
_a : int = n_embd
_a : List[Any] = n_layer
_a : Dict = n_head
_a : Optional[int] = n_inner
_a : Optional[Any] = rotary_dim
_a : List[str] = activation_function
_a : List[str] = resid_pdrop
_a : List[str] = embd_pdrop
_a : Union[str, Any] = attn_pdrop
_a : List[str] = layer_norm_epsilon
_a : Any = initializer_range
_a : Any = use_cache
_a : Any = bos_token_id
_a : Dict = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : List[str] ,_UpperCAmelCase : PretrainedConfig ,_UpperCAmelCase : str = "default" ,_UpperCAmelCase : List[PatchingSpec] = None ,_UpperCAmelCase : bool = False ,):
super().__init__(_UpperCAmelCase ,task=_UpperCAmelCase ,patching_specs=_UpperCAmelCase ,use_past=_UpperCAmelCase )
if not getattr(self._config ,'pad_token_id' ,_UpperCAmelCase ):
# TODO: how to do that better?
_a : Dict = 0
@property
def __lowercase ( self : Tuple ):
_a : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase ,direction='inputs' )
_a : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_a : Any = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowercase ( self : Dict ):
return self._config.n_layer
@property
def __lowercase ( self : Optional[int] ):
return self._config.n_head
def __lowercase ( self : int ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : int = -1 ,_UpperCAmelCase : int = -1 ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[TensorType] = None ,):
_a : List[str] = super(_UpperCAmelCase ,self ).generate_dummy_inputs(
_UpperCAmelCase ,batch_size=_UpperCAmelCase ,seq_length=_UpperCAmelCase ,is_pair=_UpperCAmelCase ,framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_a : List[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a : Optional[int] = seqlen + 2
_a : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Any = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
_a : Optional[Any] = common_inputs['attention_mask']
if self.use_past:
_a : str = ordered_inputs['attention_mask'].dtype
_a : Dict = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_UpperCAmelCase ,_UpperCAmelCase ,dtype=_UpperCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def __lowercase ( self : Dict ):
return 13
| 89
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCamelCase__ , '_dynamo' ):
return False
return isinstance(UpperCamelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = True ):
"""simple docstring"""
A__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A__ = is_compiled_module(UpperCamelCase__ )
if is_compiled:
A__ = model
A__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = model.module
if not keep_fpaa_wrapper:
A__ = getattr(UpperCamelCase__ , 'forward' )
A__ = model.__dict__.pop('_original_forward' , UpperCamelCase__ )
if original_forward is not None:
while hasattr(UpperCamelCase__ , '__wrapped__' ):
A__ = forward.__wrapped__
if forward == original_forward:
break
A__ = forward
if getattr(UpperCamelCase__ , '_converted_to_transformer_engine' , UpperCamelCase__ ):
convert_model(UpperCamelCase__ , to_transformer_engine=UpperCamelCase__ )
if is_compiled:
A__ = model
A__ = compiled_model
return model
def UpperCAmelCase ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCamelCase__ , UpperCamelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCamelCase__ , UpperCamelCase__ )
@contextmanager
def UpperCAmelCase ( **UpperCamelCase__ ):
"""simple docstring"""
for key, value in kwargs.items():
A__ = str(UpperCamelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not hasattr(UpperCamelCase__ , '__qualname__' ) and not hasattr(UpperCamelCase__ , '__name__' ):
A__ = getattr(UpperCamelCase__ , '__class__' , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(UpperCamelCase__ , '__name__' ):
return obj.__name__
return str(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for key, value in source.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = destination.setdefault(UpperCamelCase__ , {} )
merge_dicts(UpperCamelCase__ , UpperCamelCase__ )
else:
A__ = value
return destination
def UpperCAmelCase ( UpperCamelCase__ = None ):
"""simple docstring"""
if port is None:
A__ = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 221
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase : int =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = ['input_features', 'attention_mask']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=1_6000 , __lowerCAmelCase=80 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = num_mel_bins
lowercase = do_ceptral_normalize
lowercase = normalize_means
lowercase = normalize_vars
lowercase = True
def A__ ( self , __lowerCAmelCase , ):
"""simple docstring"""
lowercase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
lowercase = ta_kaldi.fbank(__lowerCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 0.0 , ):
"""simple docstring"""
if normalize_means:
lowercase = x[:input_length].mean(axis=0 )
lowercase = np.subtract(__lowerCAmelCase , __lowerCAmelCase )
if normalize_vars:
lowercase = x[:input_length].std(axis=0 )
lowercase = np.divide(__lowerCAmelCase , __lowerCAmelCase )
if input_length < x.shape[0]:
lowercase = padding_value
# make sure array is in float32
lowercase = x.astype(np.floataa )
return x
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCAmelCase , __lowerCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowerCAmelCase , __lowerCAmelCase )
]
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowercase = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowercase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
lowercase = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase = [raw_speech]
# extract fbank features
lowercase = [self._extract_fbank_features(__lowerCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowercase = BatchFeature({"""input_features""": features} )
lowercase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
# make sure list is in array format
lowercase = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __lowerCAmelCase ):
lowercase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
lowercase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowercase = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase = (
np.array(__lowerCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__lowerCAmelCase )
if return_tensors is not None:
lowercase = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
| 363
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""CLIPFeatureExtractor"""]
lowerCamelCase__ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86
|
'''simple docstring'''
lowerCamelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : int = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 47
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"camembert-base": 5_1_2,
}
UpperCamelCase : Optional[int] = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = CamembertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = vocab_file
__UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 263
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : int = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "distilbert"
lowercase = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=512 , __UpperCAmelCase=False , __UpperCAmelCase=6 , __UpperCAmelCase=12 , __UpperCAmelCase=768 , __UpperCAmelCase=4 * 768 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.2 , __UpperCAmelCase=0 , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = sinusoidal_pos_embds
__UpperCamelCase = n_layers
__UpperCamelCase = n_heads
__UpperCamelCase = dim
__UpperCamelCase = hidden_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation
__UpperCamelCase = initializer_range
__UpperCamelCase = qa_dropout
__UpperCamelCase = seq_classif_dropout
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 263
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
def run_func(SCREAMING_SNAKE_CASE_ ):
@wraps(SCREAMING_SNAKE_CASE_ )
def run_in_eager_mode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@wraps(SCREAMING_SNAKE_CASE_ )
@tf.function(experimental_compile=SCREAMING_SNAKE_CASE_ )
def run_in_graph_mode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> ["tf.Tensor"]:
lowerCAmelCase__ : Union[str, Any] = random.Random()
lowerCAmelCase__ : Dict = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(SCREAMING_SNAKE_CASE_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class A__ ( __magic_name__ ):
lowercase = 42
lowercase = 42
lowercase = "TensorFlow"
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return tf.__version__
def _lowerCamelCase ( self : List[Any] , a : str , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase__ : Union[str, Any] = self._prepare_inference_func(a , a , a )
return self._measure_speed(_inference )
def _lowerCamelCase ( self : Any , a : str , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase__ : Any = self._prepare_train_func(a , a , a )
return self._measure_speed(_train )
def _lowerCamelCase ( self : str , a : str , a : int , a : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a )
lowerCAmelCase__ : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase__ : Any = self._prepare_inference_func(a , a , a )
return self._measure_memory(_inference )
def _lowerCamelCase ( self : List[str] , a : str , a : int , a : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a )
lowerCAmelCase__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase__ : int = self._prepare_train_func(a , a , a )
return self._measure_memory(_train )
def _lowerCamelCase ( self : Dict , a : str , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase__ : List[str] = (
hasattr(a , 'architectures' )
and isinstance(config.architectures , a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase__ : str = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase__ : Union[str, Any] = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase__ : Union[str, Any] = getattr(a , a )
lowerCAmelCase__ : List[str] = model_cls(a )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase__ : str = TF_MODEL_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
lowerCAmelCase__ : Dict = config.vocab_size if hasattr(a , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase__ : Dict = random_input_ids(a , a , a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(a , decoder_input_ids=a , training=a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(a , training=a )
lowerCAmelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCamelCase ( self : Optional[int] , a : str , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase__ : Tuple = (
hasattr(a , 'architectures' )
and isinstance(config.architectures , a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase__ : int = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase__ : Union[str, Any] = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase__ : Any = getattr(a , a )
lowerCAmelCase__ : Dict = model_cls(a )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase__ : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a )
# encoder-decoder has vocab size saved differently
lowerCAmelCase__ : Optional[Any] = config.vocab_size if hasattr(a , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase__ : Optional[Any] = random_input_ids(a , a , a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase__ : Optional[int] = model(a , decoder_input_ids=a , labels=a , training=a )[0]
lowerCAmelCase__ : Optional[Any] = tf.gradients(a , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a , training=a )[0]
lowerCAmelCase__ : Dict = tf.gradients(a , model.trainable_variables )
return gradients
lowerCAmelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCamelCase ( self : Dict , a : List[Any] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(a , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase__ : Any = timeit.repeat(
a , repeat=self.args.repeat , number=10 , )
return min(a ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCamelCase ( self : Tuple , a : Callable[[], None] ):
'''simple docstring'''
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase__ : List[Any] = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase__ : Union[str, Any] = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase__ : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase__ : List[Any] = nvml.nvmlDeviceGetMemoryInfo(a )
lowerCAmelCase__ : Optional[Any] = meminfo.used
lowerCAmelCase__ : Optional[int] = Memory(a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase__ : Optional[int] = None
else:
lowerCAmelCase__ : Union[str, Any] = measure_peak_memory_cpu(a )
lowerCAmelCase__ : Dict = Memory(a ) if isinstance(a , a ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase__ : Any = stop_memory_tracing(a )
if memory is None:
lowerCAmelCase__ : List[str] = summary.total
else:
lowerCAmelCase__ : Any = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 212
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.array:
lowerCAmelCase__ : Dict = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
lowerCAmelCase__ : Optional[Any] = 'f32le'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase__ : List[Any] = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase__ : List[str] = output_stream[0]
lowerCAmelCase__ : str = np.frombuffer(SCREAMING_SNAKE_CASE_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> Dict:
lowerCAmelCase__ : Optional[Any] = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
if format_for_conversion == "s16le":
lowerCAmelCase__ : Dict = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : List[str] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCAmelCase__ : Tuple = platform.system()
if system == "Linux":
lowerCAmelCase__ : str = 'alsa'
lowerCAmelCase__ : str = 'default'
elif system == "Darwin":
lowerCAmelCase__ : Any = 'avfoundation'
lowerCAmelCase__ : Tuple = ':0'
elif system == "Windows":
lowerCAmelCase__ : Any = 'dshow'
lowerCAmelCase__ : int = 'default'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase__ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase__ : str = _ffmpeg_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for item in iterator:
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
lowerCAmelCase__ : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase__ : Tuple = chunk_length_s
lowerCAmelCase__ : Any = ffmpeg_microphone(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , format_for_conversion=SCREAMING_SNAKE_CASE_ )
if format_for_conversion == "s16le":
lowerCAmelCase__ : Optional[Any] = np.intaa
lowerCAmelCase__ : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : Optional[Any] = np.floataa
lowerCAmelCase__ : Optional[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCAmelCase__ : Dict = chunk_length_s / 6
lowerCAmelCase__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ):
lowerCAmelCase__ : Dict = [stride_length_s, stride_length_s]
lowerCAmelCase__ : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase__ : List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase__ : Any = datetime.datetime.now()
lowerCAmelCase__ : Any = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE_ )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE_ ):
# Put everything back in numpy scale
lowerCAmelCase__ : Any = np.frombuffer(item['raw'] , dtype=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = b''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCAmelCase__ : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE_ ) < chunk_len:
lowerCAmelCase__ : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE_ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase__ : Dict = (_stride_left, stride_right)
lowerCAmelCase__ : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase__ : Optional[int] = False
yield item
lowerCAmelCase__ : Optional[int] = stride_left
lowerCAmelCase__ : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE_ ) > stride_left:
lowerCAmelCase__ : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase__ : Any = False
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE_ ) as ffmpeg_process:
while True:
lowerCAmelCase__ : List[str] = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 212
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[int] = ['''image_processor''', '''tokenizer''']
__lowercase : Dict = '''Pix2StructImageProcessor'''
__lowercase : Union[str, Any] = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__)
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 2_0_4_8 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""")
# Get only text
if images is None and not self.image_processor.is_vqa:
__SCREAMING_SNAKE_CASE = self.tokenizer
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , **lowerCAmelCase__)
else:
# add pixel_values and bbox
__SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ , **lowerCAmelCase__)
if text is not None and not self.image_processor.is_vqa:
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if "attention_mask" in text_encoding:
__SCREAMING_SNAKE_CASE = text_encoding.pop("""attention_mask""")
if "input_ids" in text_encoding:
__SCREAMING_SNAKE_CASE = text_encoding.pop("""input_ids""")
else:
__SCREAMING_SNAKE_CASE = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__)
return encoding_image_processor
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 255
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[str] = CpmAntTokenizer
__lowercase : List[str] = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
@tooslow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""")
__SCREAMING_SNAKE_CASE = """今天天气真好!"""
__SCREAMING_SNAKE_CASE = ["""今天""", """天气""", """真""", """好""", """!"""]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """今天天气真好!"""
__SCREAMING_SNAKE_CASE = [tokenizer.bos_token] + tokens
__SCREAMING_SNAKE_CASE = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 255
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool , _UpperCAmelCase : list[int] , _UpperCAmelCase : float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(_UpperCAmelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , )
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase : int = [90, 23, 6, 33, 21, 65, 123, 34_423]
_UpperCAmelCase : List[Any] = math.log(len(_UpperCAmelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 31
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
_UpperCAmelCase : Optional[int] = nn.ModuleList(A )
def _A ( self : Dict , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : torch.Tensor , A : List[torch.tensor] , A : List[float] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[Dict[str, Any]] = None , A : bool = False , A : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
_UpperCAmelCase , _UpperCAmelCase : str = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = down_samples, mid_sample
else:
_UpperCAmelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _A ( self : List[str] , A : Union[str, os.PathLike] , A : bool = True , A : Callable = None , A : bool = False , A : Optional[str] = None , ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
_UpperCAmelCase : Tuple = model_path_to_save + F"""_{idx}"""
@classmethod
def _A ( cls : int , A : Optional[Union[str, os.PathLike]] , **A : Tuple ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_UpperCAmelCase : int = pretrained_model_path
while os.path.isdir(A ):
_UpperCAmelCase : List[str] = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
_UpperCAmelCase : Dict = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(A )} controlnets loaded from {pretrained_model_path}.""" )
if len(A ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(A )
| 31
| 1
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
SCREAMING_SNAKE_CASE : Any = """src/transformers"""
SCREAMING_SNAKE_CASE : Dict = """docs/source/en"""
SCREAMING_SNAKE_CASE : List[str] = """."""
def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple ) ->Optional[Any]:
"""simple docstring"""
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__snake_case : List[Any] = f.readlines()
# Find the start prompt.
__snake_case : Dict = 0
while not lines[start_index].startswith(_snake_case ):
start_index += 1
start_index += 1
__snake_case : Optional[int] = start_index
while not lines[end_index].startswith(_snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
SCREAMING_SNAKE_CASE : Optional[int] = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase ( _snake_case : Any ) ->Optional[int]:
"""simple docstring"""
__snake_case : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _snake_case )
return [m.group(0 ) for m in matches]
def lowercase ( _snake_case : Dict , _snake_case : List[str] ) ->List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(_snake_case )
__snake_case : str = (width - text_length) // 2
__snake_case : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase ( ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__snake_case : Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__snake_case : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__snake_case : List[Any] = collections.defaultdict(_snake_case )
__snake_case : List[Any] = collections.defaultdict(_snake_case )
__snake_case : Optional[Any] = collections.defaultdict(_snake_case )
__snake_case : Optional[int] = collections.defaultdict(_snake_case )
__snake_case : Any = collections.defaultdict(_snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(_snake_case ):
__snake_case : List[str] = None
if attr_name.endswith('''Tokenizer''' ):
__snake_case : Tuple = slow_tokenizers
__snake_case : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__snake_case : List[Any] = fast_tokenizers
__snake_case : Optional[int] = attr_name[:-13]
elif _re_tf_models.match(_snake_case ) is not None:
__snake_case : List[Any] = tf_models
__snake_case : int = _re_tf_models.match(_snake_case ).groups()[0]
elif _re_flax_models.match(_snake_case ) is not None:
__snake_case : Tuple = flax_models
__snake_case : Optional[int] = _re_flax_models.match(_snake_case ).groups()[0]
elif _re_pt_models.match(_snake_case ) is not None:
__snake_case : Optional[Any] = pt_models
__snake_case : List[Any] = _re_pt_models.match(_snake_case ).groups()[0]
if lookup_dict is not None:
while len(_snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
__snake_case : Optional[int] = True
break
# Try again after removing the last word in the name
__snake_case : Dict = ''''''.join(camel_case_split(_snake_case )[:-1] )
# Let's build that table!
__snake_case : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__snake_case : List[Any] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__snake_case : int = [len(_snake_case ) + 2 for c in columns]
__snake_case : List[Any] = max([len(_snake_case ) for name in model_names] ) + 2
# Build the table per se
__snake_case : Optional[int] = '''|''' + '''|'''.join([_center_text(_snake_case , _snake_case ) for c, w in zip(_snake_case , _snake_case )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__snake_case : Optional[int] = {True: '''✅''', False: '''❌'''}
for name in model_names:
__snake_case : Optional[Any] = model_name_to_prefix[name]
__snake_case : Optional[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_snake_case , _snake_case ) for l, w in zip(_snake_case , _snake_case )] ) + "|\n"
return table
def lowercase ( _snake_case : List[str]=False ) ->Dict:
"""simple docstring"""
__snake_case : int = _find_text_in_file(
filename=os.path.join(_snake_case , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__snake_case : Optional[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_snake_case , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 361
|
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 235
|
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]:
"""simple docstring"""
a_ : Any = int(__A )
# Initialize Result
a_ : Tuple = []
# Traverse through all denomination
for denomination in reversed(__A ):
# Find denominations
while int(__A ) >= int(__A ):
total_value -= int(__A )
answer.append(__A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'Following is minimal change for {value}: ')
UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 32
| 0
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Dict = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 243
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''Salesforce/blip-image-captioning-base'''
__SCREAMING_SNAKE_CASE = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
__SCREAMING_SNAKE_CASE = '''image_captioner'''
__SCREAMING_SNAKE_CASE = AutoModelForVisionaSeq
__SCREAMING_SNAKE_CASE = ['''image''']
__SCREAMING_SNAKE_CASE = ['''text''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""vision"""] )
super().__init__(*lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase ) -> Any:
return self.pre_processor(images=lowercase , return_tensors="""pt""" )
def __lowerCamelCase ( self , lowercase ) -> Optional[int]:
return self.model.generate(**lowercase )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
return self.pre_processor.batch_decode(lowercase , skip_special_tokens=lowercase )[0].strip()
| 243
| 1
|
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowerCAmelCase :Tuple = logging.getLogger(__name__)
_lowerCAmelCase :List[str] = 50 # max width of layer names
_lowerCAmelCase :int = 70 # max width of quantizer names
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Optional[Any] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=UpperCamelCase__ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=UpperCamelCase__ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=UpperCamelCase__ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=UpperCamelCase__ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=UpperCamelCase__ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=UpperCamelCase__ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
if args.calibrator == "max":
_UpperCAmelCase : str = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_UpperCAmelCase : Any = '''histogram'''
elif args.calibrator == "mse":
_UpperCAmelCase : Optional[int] = '''histogram'''
else:
raise ValueError(F'Invalid calibrator {args.calibrator}' )
_UpperCAmelCase : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=UpperCamelCase__ )
_UpperCAmelCase : Tuple = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Union[str, Any]=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(F'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCamelCase__ , ['''embeddings'''] , which='''weight''' , _disabled=UpperCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(UpperCamelCase__ , [''''''] , _disabled=UpperCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCamelCase__ , args.quant_disable_keyword , _disabled=UpperCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCamelCase__ , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=UpperCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCamelCase__ , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=UpperCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(UpperCamelCase__ )
if args.fuse_qkv:
fuse_qkv(UpperCamelCase__ , UpperCamelCase__ )
if args.clip_gelu:
clip_gelu(UpperCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : str ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}' )
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ):
def fusea(UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCamelCase__ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_UpperCAmelCase : Dict = qq._amax.detach().item()
_UpperCAmelCase : Tuple = qk._amax.detach().item()
_UpperCAmelCase : Optional[int] = qv._amax.detach().item()
_UpperCAmelCase : Any = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
qq._amax.fill_(UpperCamelCase__ )
qk._amax.fill_(UpperCamelCase__ )
qv._amax.fill_(UpperCamelCase__ )
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_UpperCAmelCase : int = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCamelCase__ )
_UpperCAmelCase : str = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_UpperCAmelCase : List[Any] = mod.weight.shape[0]
_UpperCAmelCase : Dict = mod._weight_quantizer._amax.detach()
_UpperCAmelCase : str = torch.ones(UpperCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_UpperCAmelCase : Union[str, Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_UpperCAmelCase : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_UpperCAmelCase : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=UpperCamelCase__ , keepdims=UpperCamelCase__ ).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
_UpperCAmelCase : Union[str, Any] = amax
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any=25 , UpperCamelCase__ : str=180 , UpperCamelCase__ : Union[str, Any]=None ):
if ignore is None:
_UpperCAmelCase : Optional[int] = []
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [ignore]
_UpperCAmelCase : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(UpperCamelCase__ , '''weight''' ):
continue
_UpperCAmelCase : Dict = max(UpperCamelCase__ , len(UpperCamelCase__ ) )
for name, mod in model.named_modules():
_UpperCAmelCase : Any = getattr(UpperCamelCase__ , '''_input_quantizer''' , UpperCamelCase__ )
_UpperCAmelCase : int = getattr(UpperCamelCase__ , '''_weight_quantizer''' , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , '''weight''' ):
continue
if type(UpperCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(UpperCamelCase__ ) is str and s in name]:
continue
_UpperCAmelCase : str = F'Act:{input_q.extra_repr()}'
_UpperCAmelCase : Optional[int] = F'Wgt:{weight_q.extra_repr()}'
_UpperCAmelCase : List[Any] = F'{name:{name_width}} {act_str} {wgt_str}'
if len(UpperCamelCase__ ) <= line_width:
logger.info(UpperCamelCase__ )
else:
logger.info(F'{name:{name_width}} {act_str}' )
logger.info(F'{" ":{name_width}} {wgt_str}' )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
_UpperCAmelCase : str = 0
for name, mod in model.named_modules():
if isinstance(UpperCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(F'{name:80} {mod}' )
count += 1
print(F'{count} TensorQuantizers found in model' )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : Tuple = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if quantizer_mod is not None:
assert hasattr(UpperCamelCase__ , UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
logger.warning(F'{name} has no {quantizer}' )
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]="both" , **UpperCamelCase__ : List[Any] ):
_UpperCAmelCase : List[str] = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(UpperCamelCase__ , UpperCamelCase__ , '''_input_quantizer''' , UpperCamelCase__ , UpperCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(UpperCamelCase__ , UpperCamelCase__ , '''_weight_quantizer''' , UpperCamelCase__ , UpperCamelCase__ )
logger.info(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Tuple ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_input_quantizer''' ) or hasattr(UpperCamelCase__ , '''_weight_quantizer''' ):
for n in names:
if re.search(UpperCamelCase__ , UpperCamelCase__ ):
set_quantizers(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Optional[Any] = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
logger.info(UpperCamelCase__ )
| 263
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263
| 1
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase__ : int =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase =test_results.split(""" """ )
lowerCamelCase =0
lowerCamelCase =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(a_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
lowerCamelCase ={}
lowerCamelCase =None
lowerCamelCase =False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , a_ ):
lowerCamelCase =True
lowerCamelCase =line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
lowerCamelCase =line
lowerCamelCase =False
return failures
class __A :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =title
lowerCamelCase =doc_test_results["time_spent"].split(""",""" )[0]
lowerCamelCase =doc_test_results["success"]
lowerCamelCase =doc_test_results["failures"]
lowerCamelCase =self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase =doc_test_results
@property
def _snake_case ( self ):
lowerCamelCase =[self._time_spent]
lowerCamelCase =0
for time in time_spent:
lowerCamelCase =time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__ ) == 1:
lowerCamelCase =[0, 0, time_parts[0]]
lowerCamelCase =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowerCamelCase =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"""{int(lowerCAmelCase__ )}h{int(lowerCAmelCase__ )}m{int(lowerCAmelCase__ )}s"""
@property
def _snake_case ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self ):
lowerCamelCase =40
lowerCamelCase ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )}
lowerCamelCase =""
for category, failures in category_failures.items():
if len(lowerCAmelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self ):
lowerCamelCase =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase__ )
@staticmethod
def _snake_case ( ):
lowerCamelCase =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase__ , )
def _snake_case ( self ):
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
lowerCamelCase =f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed."
lowerCamelCase =client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase__ , )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =""
for key, value in failures.items():
lowerCamelCase =value[:200] + " [Truncated]" if len(lowerCAmelCase__ ) > 250 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
lowerCamelCase =job_name
lowerCamelCase ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
lowerCamelCase ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self ):
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
lowerCamelCase =self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
lowerCamelCase =sorted(self.doc_test_results.items() , key=lambda UpperCAmelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
lowerCamelCase =f"""*Num failures* :{len(job_result["failed"] )} \n"""
lowerCamelCase =job_result["failures"]
lowerCamelCase =self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _lowercase ( ) -> int:
lowerCamelCase =os.environ["GITHUB_RUN_ID"]
lowerCamelCase =F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
lowerCamelCase =requests.get(a_ ).json()
lowerCamelCase ={}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCamelCase =math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(a_ ):
lowerCamelCase =requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , a_ )
return {}
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase ={}
if os.path.exists(a_ ):
lowerCamelCase =os.listdir(a_ )
for file in files:
try:
with open(os.path.join(a_ , a_ ) , encoding="""utf-8""" ) as f:
lowerCamelCase =f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(a_ , a_ )}.""" ) from e
return _artifact
def _lowercase ( ) -> str:
class __A :
def __init__( self , UpperCAmelCase_ ):
lowerCamelCase =name
lowerCamelCase =[]
def __str__( self ):
return self.name
def _snake_case ( self , UpperCAmelCase_ ):
self.paths.append({"""name""": self.name, """path""": path} )
lowerCamelCase ={}
lowerCamelCase =filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase =directory
if artifact_name not in _available_artifacts:
lowerCamelCase =Artifact(a_ )
_available_artifacts[artifact_name].add_path(a_ )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] =get_job_links()
UpperCAmelCase__ : Dict =retrieve_available_artifacts()
UpperCAmelCase__ : str =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase__ : List[str] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase__ : Any =github_actions_job_links.get('''run_doctests''')
UpperCAmelCase__ : Union[str, Any] =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
UpperCAmelCase__ : List[Any] =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Union[str, Any] =handle_test_results(artifact['''stats'''])
UpperCAmelCase__ : Optional[int] =failed
UpperCAmelCase__ : Union[str, Any] =success
UpperCAmelCase__ : List[Any] =time_spent[1:-1] + ''', '''
UpperCAmelCase__ : Union[str, Any] =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
UpperCAmelCase__ : Union[str, Any] =line.replace('''FAILED ''', '''''')
UpperCAmelCase__ : List[Any] =line.split()[0].replace('''\n''', '''''')
if "::" in line:
UpperCAmelCase__ ,UpperCAmelCase__ : Any =line.split('''::''')
else:
UpperCAmelCase__ ,UpperCAmelCase__ : str =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase__ : Tuple =docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase__ : List[str] =all_failures[test] if test in all_failures else '''N/A'''
UpperCAmelCase__ : List[str] =failure
break
UpperCAmelCase__ : int =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 367
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowercase ( ) -> str:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_UpperCAmelCase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def _lowercase ( ) -> Union[str, Any]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def _lowercase ( ) -> int:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_UpperCAmelCase ):
http_head("""https://huggingface.co""" )
| 262
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_UpperCamelCase: int = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
_UpperCamelCase: str = 3e8 # unit of c : m * s^-1
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
lowercase : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowercase : str = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowercase : str = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 255
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase : Tuple = 0
lowercase : int = len(_UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase : int = i + 1
else:
lowercase : List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 255
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : int = "focalnet"
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1E-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : List[str] = embed_dim
__UpperCAmelCase : int = use_conv_embed
__UpperCAmelCase : Tuple = hidden_sizes
__UpperCAmelCase : int = depths
__UpperCAmelCase : List[str] = focal_levels
__UpperCAmelCase : Dict = focal_windows
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Union[str, Any] = mlp_ratio
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : Tuple = use_layerscale
__UpperCAmelCase : str = layerscale_value
__UpperCAmelCase : str = use_post_layernorm
__UpperCAmelCase : int = use_post_layernorm_in_modulation
__UpperCAmelCase : List[Any] = normalize_modulator
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : int = encoder_stride
__UpperCAmelCase : int = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 16
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 16
| 1
|
"""simple docstring"""
import requests
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = {'Content-Type': 'application/json'}
UpperCAmelCase = requests.post(snake_case_ , json={'text': message_body} , headers=snake_case_ )
if response.status_code != 200:
UpperCAmelCase = (
'Request to slack returned an error '
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 78
|
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """timesformer"""
def __init__( self : Tuple , _lowerCamelCase : Any=224 , _lowerCamelCase : str=16 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : List[str]=8 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : List[Any]=12 , _lowerCamelCase : Optional[int]=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : str=0.0_2 , _lowerCamelCase : Any=1e-6 , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple="divided_space_time" , _lowerCamelCase : Optional[Any]=0 , **_lowerCamelCase : List[Any] , ):
super().__init__(**_lowerCamelCase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = num_frames
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = qkv_bias
_snake_case = attention_type
_snake_case = drop_path_rate
| 40
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase__ = {
'allenai/led-base-16384': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[Any]:
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]="replace" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : Any="<pad>" , _lowerCamelCase : Union[str, Any]="<mask>" , _lowerCamelCase : Optional[int]=False , **_lowerCamelCase : str , ):
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
_snake_case = json.load(_lowerCamelCase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
_snake_case = merges_handle.read().split('''\n''' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase ( self : Tuple ):
return len(self.encoder )
def lowercase ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : Dict , _lowerCamelCase : str ):
if token in self.cache:
return self.cache[token]
_snake_case = tuple(_lowerCamelCase )
_snake_case = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
_snake_case = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(_lowerCamelCase ):
try:
_snake_case = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(_lowerCamelCase )
_snake_case = new_word
if len(_lowerCamelCase ) == 1:
break
else:
_snake_case = get_pairs(_lowerCamelCase )
_snake_case = ''' '''.join(_lowerCamelCase )
_snake_case = word
return word
def lowercase ( self : str , _lowerCamelCase : Dict ):
_snake_case = []
for token in re.findall(self.pat , _lowerCamelCase ):
_snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[str] ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase ( self : Optional[int] , _lowerCamelCase : Dict ):
return self.decoder.get(_lowerCamelCase )
def lowercase ( self : Dict , _lowerCamelCase : Union[str, Any] ):
_snake_case = ''''''.join(_lowerCamelCase )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' )
_snake_case = 0
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_snake_case = token_index
writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Any=False , **_lowerCamelCase : List[Any] ):
_snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
_snake_case = ''' ''' + text
return (text, kwargs)
def lowercase ( self : int , _lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , ):
_snake_case = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
_snake_case = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 40
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = dataset
a_ = process
a_ = params
def __len__( self) ->Union[str, Any]:
return len(self.dataset)
def __getitem__( self , __UpperCAmelCase) ->Tuple:
a_ = self.dataset[i]
a_ = self.process(__UpperCAmelCase , **self.params)
return processed
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->Optional[int]:
a_ = loader
a_ = infer
a_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
a_ = None
a_ = loader_batch_size
# Internal bookkeeping
a_ = None
a_ = None
def __len__( self) ->Dict:
return len(self.loader)
def __iter__( self) ->Dict:
a_ = iter(self.loader)
return self
def UpperCAmelCase__ ( self) ->int:
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
a_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
a_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
# Convert ModelOutput to tuple first
a_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor):
a_ = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
a_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__UpperCAmelCase , __UpperCAmelCase):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
a_ = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
a_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
a_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a_ = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
a_ = np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
a_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
a_ = self._loader_batch_data.__class__(__UpperCAmelCase)
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self) ->str:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
a_ = next(self.iterator)
a_ = self.infer(__UpperCAmelCase , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__UpperCAmelCase , torch.Tensor):
a_ = processed
else:
a_ = list(processed.keys())[0]
a_ = processed[key]
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = len(__UpperCAmelCase)
else:
a_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a_ = observed_batch_size
# Setting internal index to unwrap the batch
a_ = processed
a_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->Any:
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __iter__( self) ->Optional[int]:
a_ = iter(self.loader)
a_ = None
return self
def UpperCAmelCase__ ( self) ->List[str]:
if self.subiterator is None:
a_ = self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
a_ = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
a_ = self.infer(next(self.iterator) , **self.params)
a_ = next(self.subiterator)
return processed
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __iter__( self) ->Union[str, Any]:
a_ = iter(self.loader)
return self
def UpperCAmelCase__ ( self) ->Optional[int]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
a_ = False
a_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
a_ = self.loader_batch_item()
a_ = item.pop("is_last")
accumulator.append(__UpperCAmelCase)
if is_last:
return accumulator
while not is_last:
a_ = self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(__UpperCAmelCase , torch.Tensor):
a_ = processed
else:
a_ = list(processed.keys())[0]
a_ = processed[key]
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = len(__UpperCAmelCase)
else:
a_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
a_ = observed_batch_size
a_ = processed
a_ = 0
while self._loader_batch_index < self.loader_batch_size:
a_ = self.loader_batch_item()
a_ = item.pop("is_last")
accumulator.append(__UpperCAmelCase)
if is_last:
return accumulator
else:
a_ = processed
a_ = item.pop("is_last")
accumulator.append(__UpperCAmelCase)
return accumulator
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
a_ = dataset
a_ = key
def __len__( self) ->Tuple:
return len(self.dataset)
def __getitem__( self , __UpperCAmelCase) ->int:
return self.dataset[i][self.key]
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]:
a_ = dataset
a_ = keya
a_ = keya
def __len__( self) ->Any:
return len(self.dataset)
def __getitem__( self , __UpperCAmelCase) ->int:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 243
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , ) ->int:
super().__init__()
a_ = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase)
a_ = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase)
a_ = False
a_ = nn.Dropout(p=__UpperCAmelCase)
a_ = TaConfig(
vocab_size=__UpperCAmelCase , d_model=__UpperCAmelCase , num_heads=__UpperCAmelCase , d_kv=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , feed_forward_proj=__UpperCAmelCase , is_decoder=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , )
a_ = nn.ModuleList()
for lyr_num in range(__UpperCAmelCase):
a_ = TaBlock(__UpperCAmelCase)
self.encoders.append(__UpperCAmelCase)
a_ = TaLayerNorm(__UpperCAmelCase)
a_ = nn.Dropout(p=__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]:
a_ = self.token_embedder(__UpperCAmelCase)
a_ = encoder_input_tokens.shape[1]
a_ = torch.arange(__UpperCAmelCase , device=encoder_input_tokens.device)
x += self.position_encoding(__UpperCAmelCase)
a_ = self.dropout_pre(__UpperCAmelCase)
# inverted the attention mask
a_ = encoder_input_tokens.size()
a_ = self.get_extended_attention_mask(__UpperCAmelCase , __UpperCAmelCase)
for lyr in self.encoders:
a_ = lyr(__UpperCAmelCase , __UpperCAmelCase)[0]
a_ = self.layer_norm(__UpperCAmelCase)
return self.dropout_post(__UpperCAmelCase), encoder_inputs_mask
| 243
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self , a__ , a__=13 , a__=32 , a__=3 , a__=4 , a__=[10, 20, 30, 40] , a__=[2, 2, 3, 2] , a__=True , a__=True , a__=37 , a__="gelu" , a__=10 , a__=0.0_2 , a__=["stage2", "stage3", "stage4"] , a__=[2, 3, 4] , a__=None , ):
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Optional[Any] = num_stages
_lowerCAmelCase : int = hidden_sizes
_lowerCAmelCase : Optional[Any] = depths
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Optional[Any] = num_labels
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Optional[int] = out_features
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : str = scope
def __A ( self ):
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : int = ConvNextVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = ConvNextVaForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Any = ConvNextVaBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : int = ConvNextVaBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __A ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : int = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ConvNextVaModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def __A ( self ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def __A ( self ):
pass
def __A ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCAmelCase : Tuple = True
if model_class.__name__ in [
*get_values(a__ ),
*get_values(a__ ),
]:
continue
_lowerCAmelCase : Optional[Any] = model_class(a__ )
model.to(a__ )
model.train()
_lowerCAmelCase : str = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCAmelCase : Optional[int] = model(**a__ ).loss
loss.backward()
def __A ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCAmelCase : int = False
_lowerCAmelCase : List[Any] = True
if (
model_class.__name__
in [*get_values(a__ ), *get_values(a__ )]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCAmelCase : Tuple = model_class(a__ )
model.to(a__ )
model.gradient_checkpointing_enable()
model.train()
_lowerCAmelCase : Dict = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCAmelCase : Optional[int] = model(**a__ ).loss
loss.backward()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(a__ )
_lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : List[str] = True
check_hidden_states_output(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __A ( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = ConvNextVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : str = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a__ )
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : Dict = preprocessor(images=a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**a__ )
# verify the logits
_lowerCAmelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : int = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 126
|
"""simple docstring"""
from manim import *
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : Any = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[Any] = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Dict = Text("""CPU""" , font_size=24 )
_lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCAmelCase : Dict = [mem.copy() for i in range(4 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = Text("""GPU""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : List[str] = Text("""Model""" , font_size=24 )
_lowerCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCAmelCase : Tuple = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase : List[str] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
cpu_targs.append(a__ )
_lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , aligned_edge=a__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : List[str] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCAmelCase : int = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase : List[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) , Write(a__ ) )
self.play(Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_lowerCAmelCase : int = []
_lowerCAmelCase : List[Any] = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : Tuple = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
first_animations.append(GrowFromCenter(a__ , run_time=1 ) )
_lowerCAmelCase : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 126
| 1
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 262
| 0
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
lowercase_ : Optional[int] = set()
# edges = list of graph's edges
lowercase_ : Union[str, Any] = get_edges(__SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase_ , lowercase_ : Dict = edges.pop()
chosen_vertices.add(__SCREAMING_SNAKE_CASE )
chosen_vertices.add(__SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__SCREAMING_SNAKE_CASE )
return chosen_vertices
def snake_case_ ( __SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
lowercase_ : str = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 264
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''vit'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=16 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Union[str, Any] = image_size
lowercase_ : Tuple = patch_size
lowercase_ : Tuple = num_channels
lowercase_ : Union[str, Any] = qkv_bias
lowercase_ : List[Any] = encoder_stride
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1E-4
| 264
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class __A ( A_ ,A_ ):
'''simple docstring'''
lowerCAmelCase : Any = "focalnet"
def __init__( self : Any ,_snake_case : Optional[int]=224 ,_snake_case : Tuple=4 ,_snake_case : Optional[Any]=3 ,_snake_case : Optional[Any]=96 ,_snake_case : List[Any]=False ,_snake_case : Optional[int]=[192, 384, 768, 768] ,_snake_case : Optional[Any]=[2, 2, 6, 2] ,_snake_case : Optional[int]=[2, 2, 2, 2] ,_snake_case : List[str]=[3, 3, 3, 3] ,_snake_case : List[str]="gelu" ,_snake_case : Dict=4.0 ,_snake_case : str=0.0 ,_snake_case : Any=0.1 ,_snake_case : Dict=False ,_snake_case : List[Any]=1e-4 ,_snake_case : Dict=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[Any]=False ,_snake_case : Dict=0.02 ,_snake_case : Optional[int]=1e-5 ,_snake_case : Optional[int]=32 ,_snake_case : Any=None ,_snake_case : List[Any]=None ,**_snake_case : List[Any] ,) -> Tuple:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Optional[Any] = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = embed_dim
lowercase__ : List[str] = use_conv_embed
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : Dict = depths
lowercase__ : List[Any] = focal_levels
lowercase__ : Tuple = focal_windows
lowercase__ : str = hidden_act
lowercase__ : Tuple = mlp_ratio
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : str = drop_path_rate
lowercase__ : str = use_layerscale
lowercase__ : List[Any] = layerscale_value
lowercase__ : Optional[Any] = use_post_layernorm
lowercase__ : str = use_post_layernorm_in_modulation
lowercase__ : Dict = normalize_modulator
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = layer_norm_eps
lowercase__ : Any = encoder_stride
lowercase__ : int = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )]
lowercase__ , lowercase__ : Tuple = get_aligned_output_features_output_indices(
out_features=_snake_case ,out_indices=_snake_case ,stage_names=self.stage_names )
| 16
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
debug_launcher(test_ops.main )
| 16
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''',safety_checker=__lowerCamelCase,cache_dir=__lowerCamelCase )
A__ = [t[-1] for t in os.walk(os.path.join(__lowerCamelCase,os.listdir(__lowerCamelCase )[0],'''snapshots''' ) )]
A__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''',safety_checker=__lowerCamelCase )
A__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A__ = jax.random.PRNGKey(0 )
A__ = 4
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(__lowerCamelCase )
# shard inputs and rng
A__ = replicate(__lowerCamelCase )
A__ = jax.random.split(__lowerCamelCase,__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipeline(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,jit=__lowerCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(__lowerCamelCase,dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
A__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowerCamelCase ) == num_samples
def UpperCamelCase ( self ):
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''flax''',safety_checker=__lowerCamelCase )
A__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(__lowerCamelCase )
# shard inputs and rng
A__ = replicate(__lowerCamelCase )
A__ = jax.random.split(__lowerCamelCase,__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipeline(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,jit=__lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(__lowerCamelCase,dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def UpperCamelCase ( self ):
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa,safety_checker=__lowerCamelCase )
A__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(__lowerCamelCase )
# shard inputs and rng
A__ = replicate(__lowerCamelCase )
A__ = jax.random.split(__lowerCamelCase,__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipeline(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,jit=__lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__lowerCamelCase,dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def UpperCamelCase ( self ):
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa )
A__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(__lowerCamelCase )
# shard inputs and rng
A__ = replicate(__lowerCamelCase )
A__ = jax.random.split(__lowerCamelCase,__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipeline(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,jit=__lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__lowerCamelCase,dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def UpperCamelCase ( self ):
A__ = FlaxDDIMScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule='''scaled_linear''',set_alpha_to_one=__lowerCamelCase,steps_offset=1,)
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa,scheduler=__lowerCamelCase,safety_checker=__lowerCamelCase,)
A__ = scheduler.create_state()
A__ = scheduler_state
A__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(__lowerCamelCase )
# shard inputs and rng
A__ = replicate(__lowerCamelCase )
A__ = jax.random.split(__lowerCamelCase,__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipeline(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,jit=__lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(__lowerCamelCase,dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def UpperCamelCase ( self ):
A__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = jax.random.split(jax.random.PRNGKey(0 ),__lowerCamelCase )
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa,safety_checker=__lowerCamelCase,)
A__ = replicate(__lowerCamelCase )
A__ = pipeline.prepare_inputs(__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipeline(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,jit=__lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A__ , A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa,safety_checker=__lowerCamelCase,use_memory_efficient_attention=__lowerCamelCase,)
A__ = replicate(__lowerCamelCase )
A__ = pipeline.prepare_inputs(__lowerCamelCase )
A__ = shard(__lowerCamelCase )
A__ = pipeline(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,jit=__lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 363
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE = '''Pix2StructImageProcessor'''
__SCREAMING_SNAKE_CASE = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
A__ = False
super().__init__(__lowerCamelCase,__lowerCamelCase )
def __call__( self,__lowerCamelCase=None,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 2048,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
A__ = self.tokenizer
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,**__lowerCamelCase )
else:
# add pixel_values and bbox
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,max_patches=__lowerCamelCase,header_text=__lowerCamelCase,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
if "attention_mask" in text_encoding:
A__ = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
A__ = text_encoding.pop('''input_ids''' )
else:
A__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@property
def UpperCamelCase ( self ):
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowercase = logging.getLogger(__name__)
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : str
UpperCAmelCase : List[str]
UpperCAmelCase : Optional[List[str]]
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : List[int]
UpperCAmelCase : List[int]
UpperCAmelCase : Optional[List[int]] = None
UpperCAmelCase : Optional[List[int]] = None
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Dict = """train"""
UpperCAmelCase : Any = """dev"""
UpperCAmelCase : Tuple = """test"""
class _A :
"""simple docstring"""
@staticmethod
def __snake_case ( __UpperCAmelCase : str , __UpperCAmelCase : Union[Split, str]):
raise NotImplementedError
@staticmethod
def __snake_case ( __UpperCAmelCase : str):
raise NotImplementedError
@staticmethod
def __snake_case ( __UpperCAmelCase : List[InputExample] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str="[CLS]" , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : List[Any]=-100 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : Union[str, Any]=True , ):
a : str = {label: i for i, label in enumerate(__UpperCAmelCase)}
a : Optional[Any] = []
for ex_index, example in enumerate(__UpperCAmelCase):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" , __UpperCAmelCase , len(__UpperCAmelCase))
a : str = []
a : int = []
for word, label in zip(example.words , example.labels):
a : Tuple = tokenizer.tokenize(__UpperCAmelCase)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__UpperCAmelCase) > 0:
tokens.extend(__UpperCAmelCase)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__UpperCAmelCase) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a : Dict = tokenizer.num_special_tokens_to_add()
if len(__UpperCAmelCase) > max_seq_length - special_tokens_count:
a : Dict = tokens[: (max_seq_length - special_tokens_count)]
a : str = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a : Union[str, Any] = [sequence_a_segment_id] * len(__UpperCAmelCase)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a : List[str] = [cls_token] + tokens
a : Tuple = [pad_token_label_id] + label_ids
a : List[Any] = [cls_token_segment_id] + segment_ids
a : str = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a : int = [1 if mask_padding_with_zero else 0] * len(__UpperCAmelCase)
# Zero-pad up to the sequence length.
a : str = max_seq_length - len(__UpperCAmelCase)
if pad_on_left:
a : Union[str, Any] = ([pad_token] * padding_length) + input_ids
a : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
a : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__UpperCAmelCase) == max_seq_length
assert len(__UpperCAmelCase) == max_seq_length
assert len(__UpperCAmelCase) == max_seq_length
assert len(__UpperCAmelCase) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" , example.guid)
logger.info("tokens: %s" , " ".join([str(__UpperCAmelCase) for x in tokens]))
logger.info("input_ids: %s" , " ".join([str(__UpperCAmelCase) for x in input_ids]))
logger.info("input_mask: %s" , " ".join([str(__UpperCAmelCase) for x in input_mask]))
logger.info("segment_ids: %s" , " ".join([str(__UpperCAmelCase) for x in segment_ids]))
logger.info("label_ids: %s" , " ".join([str(__UpperCAmelCase) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
a : Optional[Any] = None
features.append(
InputFeatures(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , label_ids=__UpperCAmelCase))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[Any] , __UpperCAmelCase : TokenClassificationTask , __UpperCAmelCase : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int=False , __UpperCAmelCase : Split = Split.train , ):
# Load data features from cache or dataset file
a : int = os.path.join(
__UpperCAmelCase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(__UpperCAmelCase)) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Dict = cached_features_file + ".lock"
with FileLock(__UpperCAmelCase):
if os.path.exists(__UpperCAmelCase) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''')
a : Optional[int] = torch.load(__UpperCAmelCase)
else:
logger.info(f'''Creating features from dataset file at {data_dir}''')
a : Tuple = token_classification_task.read_examples_from_file(__UpperCAmelCase , __UpperCAmelCase)
# TODO clean up all this to leverage built-in features of tokenizers
a : Any = token_classification_task.convert_examples_to_features(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls_token_at_end=bool(model_type in ["xlnet"]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == "left") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''')
torch.save(self.features , __UpperCAmelCase)
def __len__( self : int):
return len(self.features)
def __getitem__( self : str , __UpperCAmelCase : Tuple):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _A :
"""simple docstring"""
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = -1_0_0
def __init__( self : str , __UpperCAmelCase : TokenClassificationTask , __UpperCAmelCase : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Split = Split.train , ):
a : Optional[Any] = token_classification_task.read_examples_from_file(__UpperCAmelCase , __UpperCAmelCase)
# TODO clean up all this to leverage built-in features of tokenizers
a : Tuple = token_classification_task.convert_examples_to_features(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls_token_at_end=bool(model_type in ["xlnet"]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == "left") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a : List[Any] = tf.data.Dataset.from_generator(
__UpperCAmelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])},
tf.TensorShape([None]),
) , )
else:
a : Tuple = tf.data.Dataset.from_generator(
__UpperCAmelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([None]),
) , )
def __snake_case ( self : Optional[Any]):
a : str = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self : Optional[Any]):
return len(self.features)
def __getitem__( self : str , __UpperCAmelCase : Dict):
return self.features[i]
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCAmelCase = "true"
def _snake_case ( A , A=82 , A=16 ) -> int:
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(A__ )
lowerCAmelCase__ = RegressionDataset(length=A__ )
lowerCAmelCase__ = DataLoader(A__ , batch_size=A__ )
model.to(accelerator.device )
lowerCAmelCase__ = accelerator.prepare(A__ , A__ )
return model, ddp_model, dataloader
def _snake_case ( A , A=False ) -> List[Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(A ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A ):
if use_longest:
return tokenizer.pad(A__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(A__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(A__ , shuffle=A__ , collate_fn=A__ , batch_size=16 )
def _snake_case ( A , A ) -> str:
lowerCAmelCase__ = Accelerator(dispatch_batches=A__ , split_batches=A__ )
lowerCAmelCase__ = get_dataloader(A__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A__ )
lowerCAmelCase__ = accelerator.prepare(A__ , A__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _snake_case ( A , A , A ) -> Tuple:
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(A__ )
lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(A__ )
targs.append(A__ )
lowerCAmelCase__ = torch.cat(A__ ), torch.cat(A__ )
return logits, targs
def _snake_case ( A , A=82 , A=False , A=False , A=16 ) -> Union[str, Any]:
lowerCAmelCase__ = get_basic_setup(A__ , A__ , A__ )
lowerCAmelCase__ = generate_predictions(A__ , A__ , A__ )
assert (
len(A__ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A__ )}"""
def _snake_case ( A = False , A = False ) -> Any:
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ = get_mrpc_setup(A__ , A__ )
# First do baseline
lowerCAmelCase__ = setup["""no"""]
model.to(A__ )
model.eval()
for batch in dataloader:
batch.to(A__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**A__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**A__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch["""labels"""]
lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A__ , references=A__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _snake_case ( ) -> int:
lowerCAmelCase__ = Accelerator(split_batches=A__ , dispatch_batches=A__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(A__ , A__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=A__ , dispatch_batches=A__ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(A__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(A__ , 512 )
accelerator.state._reset_state()
def _snake_case ( A ) -> Tuple:
main()
if __name__ == "__main__":
main()
| 351
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = CLIPTokenizer
lowercase__ : List[str] = CLIPTokenizerFast
lowercase__ : Dict = True
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
super().setUp()
# fmt: off
lowerCAmelCase__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowerCAmelCase__ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = '''lower newer'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase__ = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase__ = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase__ = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = F""" {text}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().test_tokenization_python_rust_equals()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# CLIP always lower cases letters
pass
| 228
| 0
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase = 8.988E9 # units = N * m^s * C^-2
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ) ->dict[str, float]:
lowerCamelCase__ : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
lowerCamelCase__ : str =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCamelCase__ : Dict =abs(snake_case_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCamelCase__ : Optional[int] =abs(snake_case_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCamelCase__ : Optional[Any] =(COULOMBS_CONSTANT * charge_product / abs(snake_case_ )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Optional[Any]="binary" , lowerCamelCase_ :int=None , lowerCamelCase_ :List[Any]="warn" , ):
"""simple docstring"""
lowerCamelCase__ : List[str] =recall_score(
lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ , pos_label=lowerCamelCase_ , average=lowerCamelCase_ , sample_weight=lowerCamelCase_ , zero_division=lowerCamelCase_ , )
return {"recall": float(lowerCamelCase_ ) if score.size == 1 else score}
| 126
| 1
|
'''simple docstring'''
from ... import PretrainedConfig
__a = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase_ : Optional[Any] = '''nezha'''
def __init__( self : Any , lowerCAmelCase__ : Optional[int]=2_1_1_2_8 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : str=1_2 , lowerCAmelCase__ : int=3_0_7_2 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Dict=6_4 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Any=1e-12 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : Tuple = max_relative_position
_UpperCAmelCase : Optional[Any] = type_vocab_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : int = classifier_dropout
_UpperCAmelCase : Any = use_cache
| 17
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCAmelCase : List[str] = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Any = str(bin(a_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : Dict = max(len(a_ ), len(a_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a_ ), b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowercase ( _a ):
snake_case_ : List[str] = 384
if "tiny" in model_name:
snake_case_ : List[str] = [3, 3, 9, 3]
snake_case_ : str = [96, 192, 384, 768]
if "small" in model_name:
snake_case_ : List[Any] = [3, 3, 27, 3]
snake_case_ : List[Any] = [96, 192, 384, 768]
if "base" in model_name:
snake_case_ : Any = [3, 3, 27, 3]
snake_case_ : Dict = [128, 256, 512, 1_024]
snake_case_ : Any = 512
if "large" in model_name:
snake_case_ : str = [3, 3, 27, 3]
snake_case_ : Tuple = [192, 384, 768, 1_536]
snake_case_ : List[str] = 768
if "xlarge" in model_name:
snake_case_ : Optional[Any] = [3, 3, 27, 3]
snake_case_ : List[str] = [256, 512, 1_024, 2_048]
snake_case_ : Tuple = 1_024
# set label information
snake_case_ : Dict = 150
snake_case_ : int = '''huggingface/label-files'''
snake_case_ : Union[str, Any] = '''ade20k-id2label.json'''
snake_case_ : Optional[Any] = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : int = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Any = ConvNextConfig(
depths=_a , hidden_sizes=_a , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
snake_case_ : Optional[int] = UperNetConfig(
backbone_config=_a , auxiliary_in_channels=_a , num_labels=_a , idalabel=_a , labelaid=_a , )
return config
def __lowercase ( _a ):
snake_case_ : Optional[int] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a ):
snake_case_ : Optional[int] = dct.pop(_a )
snake_case_ : Any = val
def __lowercase ( _a , _a , _a ):
snake_case_ : int = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
snake_case_ : Optional[Any] = model_name_to_url[model_name]
snake_case_ : Optional[Any] = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )['''state_dict''']
snake_case_ : List[Any] = get_upernet_config(_a )
snake_case_ : List[str] = UperNetForSemanticSegmentation(_a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case_ : List[Any] = state_dict.pop(_a )
if "bn" in key:
snake_case_ : List[str] = key.replace('''bn''' , '''batch_norm''' )
snake_case_ : List[str] = val
# rename keys
snake_case_ : Dict = create_rename_keys(_a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
model.load_state_dict(_a )
# verify on image
snake_case_ : Tuple = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
snake_case_ : int = Image.open(requests.get(_a , stream=_a ).raw ).convert('''RGB''' )
snake_case_ : Union[str, Any] = SegformerImageProcessor()
snake_case_ : Any = processor(_a , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
snake_case_ : Optional[Any] = model(_a )
if model_name == "upernet-convnext-tiny":
snake_case_ : Tuple = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
snake_case_ : Dict = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
snake_case_ : Union[str, Any] = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
snake_case_ : Tuple = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
snake_case_ : Any = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264
| 1
|
def _A ( __magic_name__ = 100_0000 ):
lowercase__ = limit + 1
lowercase__ = [0] * limit
for first_term in range(1 , __magic_name__ ):
for n in range(__magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase__ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 201
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( __magic_name__ ):
lowercase__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionLatentUpscalePipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
__lowerCamelCase = True
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 4
lowercase__ = (16, 16)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowercase__ = EulerDiscreteScheduler(prediction_type="sample" )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :int=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
lowercase__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 2
lowercase__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ = getattr(_lowercase , scheduler_enum.name )
lowercase__ = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase__ = pipe(_lowercase , generator=_lowercase , output_type="latent" ).images
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 201
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Any = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : List[str] = ['''sentencepiece''']
def __init__( self : List[str] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : str = ['''sentencepiece''']
def __init__( self : int , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : int ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : str = ['''sentencepiece''']
def __init__( self : Any , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Any = ['''sentencepiece''']
def __init__( self : List[Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : List[str] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : str ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : Any , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Any = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : Tuple , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Dict = ['''sentencepiece''']
def __init__( self : int , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : int = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : int = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : int = ['''sentencepiece''']
def __init__( self : Any , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : int ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : str , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : List[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Any = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : int = ['''sentencepiece''']
def __init__( self : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Any ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Dict = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : int = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ['''sentencepiece''']
def __init__( self : Dict , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : str ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : int = ['''sentencepiece''']
def __init__( self : int , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : str = ['''sentencepiece''']
def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ['''sentencepiece''']
def __init__( self : Any , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : str ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : Dict , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = ['''sentencepiece''']
def __init__( self : Dict , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Tuple = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Tuple = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Dict = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase ( metaclass=snake_case__ ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = ['''sentencepiece''']
def __init__( self : Any , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['''sentencepiece'''] )
| 121
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39
| 0
|
import qiskit
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_snake_case = qiskit.Aer.get_backend('''aer_simulator''' )
_snake_case = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_snake_case = qiskit.execute(_UpperCamelCase , _UpperCamelCase , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
__A = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 351
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__A = logging.get_logger(__name__)
class lowercase_ ( __lowercase ):
def __init__( self : Optional[Any] , *A__ : List[Any] , **A__ : int ) -> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , A__ , )
super().__init__(*A__ , **A__ )
| 278
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = cipher_alphabet or [chr(lowercase__ ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase_ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase_ :List[str] = frequencies_dict
if not case_sensitive:
lowerCAmelCase_ :Optional[Any] = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase_ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
lowerCAmelCase_ :Tuple = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase_ :Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase_ :Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase_ :List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ :Tuple = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ :List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ :Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ :List[Any] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ :str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ :Tuple = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase_ :Dict = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase_ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Union[str, Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 84
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = ShapEImgaImgPipeline
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase__ = False
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return 8
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
a = CLIPVisionModel(__magic_name__ )
return model
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__magic_name__ , do_normalize=__magic_name__ , do_resize=__magic_name__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
torch.manual_seed(0 )
a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
a = PriorTransformer(**__magic_name__ )
return model
@property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
a = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__magic_name__ )
return model
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_image_processor
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=__magic_name__ , clip_sample=__magic_name__ , clip_sample_range=1.0 , )
a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str , __magic_name__ :Tuple=0 ):
'''simple docstring'''
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = pipe(**self.get_dummy_inputs(__magic_name__ ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = torch_device == """cpu"""
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__magic_name__ , relax_max_difference=__magic_name__ , )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = 1
a = 2
a = self.get_dummy_inputs(__magic_name__ )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__magic_name__ , num_images_per_prompt=__magic_name__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
a = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
a = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = torch.Generator(device=__magic_name__ ).manual_seed(0 )
a = pipe(
__magic_name__ , generator=__magic_name__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 228
| 0
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def __lowerCAmelCase ( a__ ) -> List[Any]:
__a = split_dict._to_yaml_list()
assert len(__a ) == len(__a )
__a = SplitDict._from_yaml_list(__a )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__a = None
# the split name of split_dict takes over the name of the split info object
__a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=__a ), SplitInfo(dataset_name='''my_dataset''' )] )
def __lowerCAmelCase ( a__ ) -> List[str]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
__a = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 350
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 0
|
"""simple docstring"""
from ... import PretrainedConfig
_a = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCAmelCase : Optional[Any] = "nezha"
def __init__( self : Tuple, UpperCAmelCase__ : List[Any]=2_1_1_2_8, UpperCAmelCase__ : Tuple=7_6_8, UpperCAmelCase__ : List[str]=1_2, UpperCAmelCase__ : List[Any]=1_2, UpperCAmelCase__ : str=3_0_7_2, UpperCAmelCase__ : str="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : int=0.1, UpperCAmelCase__ : List[str]=5_1_2, UpperCAmelCase__ : List[Any]=6_4, UpperCAmelCase__ : Optional[int]=2, UpperCAmelCase__ : str=0.02, UpperCAmelCase__ : Optional[int]=1E-12, UpperCAmelCase__ : int=0.1, UpperCAmelCase__ : str=0, UpperCAmelCase__ : Optional[Any]=2, UpperCAmelCase__ : List[Any]=3, UpperCAmelCase__ : str=True, **UpperCAmelCase__ : int, ):
super().__init__(pad_token_id=UpperCAmelCase__, bos_token_id=UpperCAmelCase__, eos_token_id=UpperCAmelCase__, **UpperCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = max_relative_position
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = classifier_dropout
__lowercase = use_cache
| 17
|
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17
| 1
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def lowercase () -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE = queue.Queue()
SCREAMING_SNAKE_CASE = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
SCREAMING_SNAKE_CASE = q.get()
SCREAMING_SNAKE_CASE = F'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = left_node
q.put(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = F'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = right_node
q.put(SCREAMING_SNAKE_CASE_ )
raise
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE = queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
SCREAMING_SNAKE_CASE = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE = queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
SCREAMING_SNAKE_CASE = []
while not q.empty():
SCREAMING_SNAKE_CASE = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE = n.right
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = n.left
SCREAMING_SNAKE_CASE = stack.pop()
print(n.data , end=',' )
SCREAMING_SNAKE_CASE = n.right
def lowercase (SCREAMING_SNAKE_CASE_ : TreeNode ) -> None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
SCREAMING_SNAKE_CASE = node
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowercase (SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : int=50 , SCREAMING_SNAKE_CASE_ : Tuple="*" ) -> str:
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(width - len(SCREAMING_SNAKE_CASE_ ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
__UpperCamelCase = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 38
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Any = ["""image"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""image"""]
SCREAMING_SNAKE_CASE_ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Any = False
@property
def __A ( self ) -> Tuple:
return 32
@property
def __A ( self ) -> Optional[int]:
return 32
@property
def __A ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __A ( self ) -> Union[str, Any]:
return 8
@property
def __A ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __A ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE = ShapERenderer(**lowerCAmelCase__ )
return model
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.dummy_prior
SCREAMING_SNAKE_CASE = self.dummy_image_encoder
SCREAMING_SNAKE_CASE = self.dummy_image_processor
SCREAMING_SNAKE_CASE = self.dummy_renderer
SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = torch_device == 'cpu'
SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 38
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] ) -> Dict:
UpperCamelCase__ : str = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Optional[Any] = StableDiffusionLatentUpscalePipeline
a : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
a : str = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a : Optional[Any] = frozenset([] )
a : Dict = True
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : List[Any] = 4
UpperCamelCase__ : List[Any] = (16, 16)
UpperCamelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(__magic_name__ )
return image
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(
act_fn='''gelu''', attention_head_dim=8, norm_num_groups=__magic_name__, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
), in_channels=8, mid_block_type=__magic_name__, only_cross_attention=__magic_name__, out_channels=5, resnet_time_scale_shift='''scale_shift''', time_embedding_type='''fourier''', timestep_post_act='''gelu''', up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D'''), )
UpperCamelCase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
UpperCamelCase__ : Dict = EulerDiscreteScheduler(prediction_type='''sample''' )
UpperCamelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='''quick_gelu''', projection_dim=512, )
UpperCamelCase__ : Optional[int] = CLIPTextModel(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : Dict = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=0 ) -> Dict:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCamelCase__ : Optional[int] = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase__ : Any = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase__ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = '''cpu'''
UpperCamelCase__ : List[str] = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Any = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : str = pipe(**__magic_name__ ).images
UpperCamelCase__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 256, 256, 3) )
UpperCamelCase__ : int = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
UpperCamelCase__ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__, 1E-3 )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
UpperCamelCase__ : Tuple = self.get_dummy_components()
UpperCamelCase__ : str = self.pipeline_class(**__magic_name__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Dict = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : List[str] = 2
UpperCamelCase__ : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCamelCase__ : Any = getattr(__magic_name__, scheduler_enum.name )
UpperCamelCase__ : List[Any] = scheduler_cls.from_config(pipe.scheduler.config )
UpperCamelCase__ : str = pipe(**__magic_name__ )[0]
outputs.append(__magic_name__ )
assert check_same_shape(__magic_name__ )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = torch.manual_seed(33 )
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''', torch_dtype=torch.floataa )
pipe.to('''cuda''' )
UpperCamelCase__ : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''', torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
UpperCamelCase__ : str = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
UpperCamelCase__ : str = pipe(__magic_name__, generator=__magic_name__, output_type='''latent''' ).images
UpperCamelCase__ : Optional[Any] = upscaler(
prompt=__magic_name__, image=__magic_name__, num_inference_steps=20, guidance_scale=0, generator=__magic_name__, output_type='''np''', ).images[0]
UpperCamelCase__ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = torch.manual_seed(33 )
UpperCamelCase__ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''', torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
UpperCamelCase__ : Union[str, Any] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
UpperCamelCase__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
UpperCamelCase__ : List[Any] = upscaler(
prompt=__magic_name__, image=__magic_name__, num_inference_steps=20, guidance_scale=0, generator=__magic_name__, output_type='''np''', ).images[0]
UpperCamelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 201
|
def lowerCAmelCase_ ( __UpperCAmelCase: int = 100_0000 ) -> int:
UpperCamelCase__ : str = limit + 1
UpperCamelCase__ : List[str] = [0] * limit
for first_term in range(1 , __UpperCAmelCase ):
for n in range(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase__ : str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCamelCase__ : Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201
| 1
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE_ = 1_28
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE_ = 12
SCREAMING_SNAKE_CASE_ = 12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE_ = 14
SCREAMING_SNAKE_CASE_ = 14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 16
else:
raise ValueError('Model not supported' )
SCREAMING_SNAKE_CASE_ = 'huggingface/label-files'
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE_ = 35
SCREAMING_SNAKE_CASE_ = 'speech-commands-v2-id2label.json'
else:
SCREAMING_SNAKE_CASE_ = 5_27
SCREAMING_SNAKE_CASE_ = 'audioset-id2label.json'
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
if "module.v" in name:
SCREAMING_SNAKE_CASE_ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE_ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
SCREAMING_SNAKE_CASE_ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE_ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE_ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE_ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE_ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Dict:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ = int(key_split[3] )
SCREAMING_SNAKE_CASE_ = config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ = val[:dim, :]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ = val[:dim]
SCREAMING_SNAKE_CASE_ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ = val[-dim:]
else:
SCREAMING_SNAKE_CASE_ = val
return orig_state_dict
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> str:
SCREAMING_SNAKE_CASE_ = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any]=False ) -> Dict:
SCREAMING_SNAKE_CASE_ = get_audio_spectrogram_transformer_config(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='cpu' )
# remove some keys
remove_keys(__UpperCAmelCase )
# rename some keys
SCREAMING_SNAKE_CASE_ = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
# load 🤗 model
SCREAMING_SNAKE_CASE_ = ASTForAudioClassification(__UpperCAmelCase )
model.eval()
model.load_state_dict(__UpperCAmelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE_ = -4.2_6_7_7_3_9_3 if 'speech-commands' not in model_name else -6.8_4_5_9_7_8
SCREAMING_SNAKE_CASE_ = 4.5_6_8_9_9_7_4 if 'speech-commands' not in model_name else 5.5_6_5_4_5_2_6
SCREAMING_SNAKE_CASE_ = 10_24 if 'speech-commands' not in model_name else 1_28
SCREAMING_SNAKE_CASE_ = ASTFeatureExtractor(mean=__UpperCAmelCase , std=__UpperCAmelCase , max_length=__UpperCAmelCase )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE_ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
SCREAMING_SNAKE_CASE_ = dataset[0]['audio']['array']
else:
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torchaudio.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE_ = feature_extractor(__UpperCAmelCase , sampling_rate=1_60_00 , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE_ = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE_ = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase__ : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 210
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ : str = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : Optional[int] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model('roberta' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , 'fake-roberta' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertEqual(type(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('model' , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('bert' , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self : int ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : Tuple ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowerCAmelCase_ ( self : Any ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "new-model"
try:
AutoConfig.register('new-model' , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 210
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowerCAmelCase__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCAmelCase__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCAmelCase__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,reference_urls=[] ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : Any=None ,lowercase__ : List[Any]=False ,lowercase__ : Optional[Any]=False ,lowercase__ : int=False ,):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__lowercase = np.array([re.sub(lowercase__ ,'''''' ,lowercase__ ) for x in predictions] )
__lowercase = np.array([re.sub(lowercase__ ,'''''' ,lowercase__ ) for x in references] )
else:
__lowercase = np.asarray(lowercase__ )
__lowercase = np.asarray(lowercase__ )
if ignore_case:
__lowercase = np.char.lower(lowercase__ )
__lowercase = np.char.lower(lowercase__ )
if ignore_punctuation:
__lowercase = string.punctuation.maketrans('''''' ,'''''' ,string.punctuation )
__lowercase = np.char.translate(lowercase__ ,table=lowercase__ )
__lowercase = np.char.translate(lowercase__ ,table=lowercase__ )
if ignore_numbers:
__lowercase = string.digits.maketrans('''''' ,'''''' ,string.digits )
__lowercase = np.char.translate(lowercase__ ,table=lowercase__ )
__lowercase = np.char.translate(lowercase__ ,table=lowercase__ )
__lowercase = predictions == references
return {"exact_match": np.mean(lowercase__ ) * 1_0_0}
| 104
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
snake_case_ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
snake_case_ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def _lowerCAmelCase ( ):
UpperCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCAmelCase = bs[:]
UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
UpperCAmelCase = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = set()
UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase = char
return pairs
class A_ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self :List[Any] , lowercase_ :List[str] , lowercase_ :Union[str, Any] , lowercase_ :str="replace" , lowercase_ :str="<s>" , lowercase_ :Tuple="</s>" , lowercase_ :Dict="</s>" , lowercase_ :Union[str, Any]="<s>" , lowercase_ :Dict="<unk>" , lowercase_ :List[Any]="<pad>" , lowercase_ :str="<mask>" , lowercase_ :Tuple=False , **lowercase_ :Tuple , ) -> List[str]:
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase = json.load(lowercase_ )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
UpperCAmelCase = errors # how to handle errors in decoding
UpperCAmelCase = bytes_to_unicode()
UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding='utf-8' ) as merges_handle:
UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = {}
UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
return len(self.encoder )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[Any] ) -> Any:
if token in self.cache:
return self.cache[token]
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = get_pairs(lowercase_ )
if not pairs:
return token
while True:
UpperCAmelCase = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase = bigram
UpperCAmelCase = []
UpperCAmelCase = 0
while i < len(lowercase_ ):
try:
UpperCAmelCase = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase = tuple(lowercase_ )
UpperCAmelCase = new_word
if len(lowercase_ ) == 1:
break
else:
UpperCAmelCase = get_pairs(lowercase_ )
UpperCAmelCase = ' '.join(lowercase_ )
UpperCAmelCase = word
return word
def UpperCAmelCase__ ( self :str , lowercase_ :Dict ) -> int:
UpperCAmelCase = []
for token in re.findall(self.pat , lowercase_ ):
UpperCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(' ' ) )
return bpe_tokens
def UpperCAmelCase__ ( self :Any , lowercase_ :int ) -> Dict:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Dict ) -> Dict:
return self.decoder.get(lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :Any ) -> Optional[int]:
UpperCAmelCase = ''.join(lowercase_ )
UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Dict , lowercase_ :Union[str, Any] = None ) -> Optional[int]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '\n' )
UpperCAmelCase = 0
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase = token_index
writer.write(' '.join(lowercase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :str , lowercase_ :Dict = None ) -> List[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self :Any , lowercase_ :str , lowercase_ :Dict = None , lowercase_ :Union[str, Any] = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase__ ( self :int , lowercase_ :List[str] , lowercase_ :List[str] = None ) -> Any:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self :int , lowercase_ :str , lowercase_ :Dict=False , **lowercase_ :Optional[int] ) -> str:
UpperCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase = ' ' + text
return (text, kwargs)
| 356
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 181
| 0
|
from __future__ import annotations
class _lowerCamelCase:
def __init__( self, lowerCamelCase=None) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = data
_lowercase : List[str] = None
def __repr__( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = []
_lowercase : Dict = self
while temp:
string_rep.append(F'''{temp.data}''')
_lowercase : int = temp.next
return "->".join(lowerCamelCase)
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : List[str] = Node(elements_list[0] )
for i in range(1 , len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : str = current.next
return head
def UpperCamelCase_( lowerCamelCase_ ) -> None:
if head_node is not None and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase_( ) -> Dict:
from doctest import testmod
testmod()
_lowercase : str = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCamelCase_ )
print('Elements in Reverse:' )
print_reverse(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 21
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self : int , A : Tuple , A : int=3 , A : List[str]=32 , A : Dict=3 , A : Any=10 , A : Dict=[10, 20, 30, 40] , A : Optional[Any]=[1, 1, 2, 1] , A : Union[str, Any]=True , A : Optional[Any]=True , A : Any="relu" , A : Optional[Any]=3 , A : Tuple=None , ) -> Dict:
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : int = num_channels
lowercase_ : int = embeddings_size
lowercase_ : str = hidden_sizes
lowercase_ : List[str] = depths
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Optional[Any] = len(A )
def A ( self : str ) -> Tuple:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : str , A : Tuple , A : str , A : str ) -> str:
lowercase_ : str = TFResNetModel(config=A )
lowercase_ : Union[str, Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , A : int , A : List[Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : Tuple = self.num_labels
lowercase_ : Union[str, Any] = TFResNetForImageClassification(A )
lowercase_ : Tuple = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = False
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : int = TFResNetModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Dict ) -> List[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def A ( self : Any ) -> Any:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def A ( self : List[str] ) -> Optional[Any]:
pass
def A ( self : str ) -> Tuple:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A )
lowercase_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> List[str]:
def check_hidden_states_output(A : Union[str, Any] , A : int , A : List[Any] ):
lowercase_ : int = model_class(A )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowercase_ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Any = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : List[str] = layer_type
lowercase_ : Tuple = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def A ( self : List[str] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Any ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Any ) -> Optional[int]:
lowercase_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowercase_ : Tuple = model(**A )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 33
| 0
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: List[str] , lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: List[str] = WavaVecaForSequenceClassification.from_pretrained(lowercase__ , config=lowercase__ )
UpperCAmelCase_: Tuple = downstream_dict['projector.weight']
UpperCAmelCase_: List[str] = downstream_dict['projector.bias']
UpperCAmelCase_: Dict = downstream_dict['model.post_net.linear.weight']
UpperCAmelCase_: int = downstream_dict['model.post_net.linear.bias']
return model
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: str , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowercase__ , config=lowercase__ )
UpperCAmelCase_: Any = downstream_dict['model.linear.weight']
UpperCAmelCase_: str = downstream_dict['model.linear.bias']
return model
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: Dict = WavaVecaForXVector.from_pretrained(lowercase__ , config=lowercase__ )
UpperCAmelCase_: str = downstream_dict['connector.weight']
UpperCAmelCase_: Dict = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_: Optional[Any] = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
UpperCAmelCase_: Dict = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
UpperCAmelCase_: Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCAmelCase_: Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCAmelCase_: str = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCAmelCase_: Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCAmelCase_: Tuple = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = torch.load(lowercase__ , map_location="""cpu""" )
UpperCAmelCase_: List[Any] = checkpoint['Downstream']
UpperCAmelCase_: List[str] = WavaVecaConfig.from_pretrained(lowercase__ )
UpperCAmelCase_: Dict = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ , return_attention_mask=lowercase__ , do_normalize=lowercase__ )
UpperCAmelCase_: Union[str, Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
UpperCAmelCase_: Optional[int] = convert_classification(lowercase__ , lowercase__ , lowercase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
UpperCAmelCase_: str = convert_diarization(lowercase__ , lowercase__ , lowercase__ )
elif arch.endswith("""ForXVector""" ):
UpperCAmelCase_: List[Any] = convert_xvector(lowercase__ , lowercase__ , lowercase__ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_: Dict = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 362
|
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = 0.00
UpperCAmelCase_: List[str] = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase_: Dict = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowerCAmelCase__ )
first_sum += 1 / float(lowerCAmelCase__ )
index += 1
return 1 / first_sum
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Any = 0.00
UpperCAmelCase_: int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase_: int = F'Resistor at index {index} has a negative value!'
raise ValueError(lowerCAmelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 0
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCamelCase :List[Any] = grid[0]
for row_n in range(1 , len(__magic_name__ ) ):
UpperCamelCase :List[str] = grid[row_n]
UpperCamelCase :List[Any] = fill_row(__magic_name__ , __magic_name__ )
UpperCamelCase :Tuple = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__magic_name__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(__magic_name__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" )
UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" )
UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ )
UpperCamelCase :List[Any] = number_of_qubits
for i in range(__magic_name__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__magic_name__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__magic_name__ , __magic_name__ )
# simulate with 10000 shots
UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" )
UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 )
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 38
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'AutoImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =self.image_processor
def __call__( self : Dict , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ : Dict =self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
A__ : Union[str, Any] =self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
A__ : Union[str, Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : str , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : List[str] ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 136
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__snake_case : Any = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 136
| 1
|
import random
from typing import Any
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
for _ in range(len(lowercase ) ):
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
__a : Optional[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
__a : str = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 210
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : Optional[int] = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = '''vivit'''
def __init__( self , lowerCAmelCase__=2_24 , lowerCAmelCase__=32 , lowerCAmelCase__=[2, 16, 16] , lowerCAmelCase__=3 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu_fast" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-06 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = num_frames
__lowercase = tubelet_size
__lowercase = num_channels
__lowercase = qkv_bias
super().__init__(**lowerCAmelCase__ )
| 210
| 1
|
'''simple docstring'''
def a_ ( lowerCamelCase : int ):
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase = head.next, head
while fast and fast.next:
lowerCAmelCase = fast.next.next
lowerCAmelCase = slow.next
lowerCAmelCase = slow.next
lowerCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase = None
while second:
lowerCAmelCase = second.next
lowerCAmelCase = node
lowerCAmelCase = second
lowerCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase = node.next
lowerCAmelCase = head.next
return True
def a_ ( lowerCamelCase : Dict ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase = [slow.val]
while slow.next:
lowerCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase = cur.next
return True
def a_ ( lowerCamelCase : List[str] ):
if not head or not head.next:
return True
lowerCAmelCase = {}
lowerCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(lowerCamelCase )
else:
lowerCAmelCase = [pos]
lowerCAmelCase = head.next
pos += 1
lowerCAmelCase = pos - 1
lowerCAmelCase = 0
for v in d.values():
if len(lowerCamelCase ) % 2 != 0:
middle += 1
else:
lowerCAmelCase = 0
for i in range(0 , len(lowerCamelCase ) ):
if v[i] + v[len(lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 55
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase : str = "AAPL" ):
lowerCAmelCase = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase ).text , 'html.parser' )
lowerCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 55
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A , A ) -> int:
"""simple docstring"""
while second != 0:
lowercase__ = first & second
first ^= second
lowercase__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = int(input('Enter the first number: ').strip())
lowerCamelCase : List[str] = int(input('Enter the second number: ').strip())
print(f"""{add(first, second) = }""")
| 2
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
@lru_cache()
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : int = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase__ : Optional[int] = bs[:]
UpperCAmelCase__ : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Any = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = set()
UpperCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[int] = char
return pairs
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , _A : Optional[int] , _A : List[Any] , _A : int="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : List[Any]="</s>" , _A : Optional[int]="<s>" , _A : List[str]="<unk>" , _A : List[str]="<pad>" , _A : Union[str, Any]="<mask>" , _A : Any=False , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase__ : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
UpperCAmelCase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(_A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[str] = errors # how to handle errors in decoding
UpperCAmelCase__ : str = bytes_to_unicode()
UpperCAmelCase__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase__ : str = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : List[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : List[Any] , _A : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : Dict = get_pairs(_A )
if not pairs:
return token
while True:
UpperCAmelCase__ : Optional[Any] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : str = bigram
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Tuple = 0
while i < len(_A ):
try:
UpperCAmelCase__ : Optional[int] = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Tuple = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : List[Any] = new_word
if len(_A ) == 1:
break
else:
UpperCAmelCase__ : Union[str, Any] = get_pairs(_A )
UpperCAmelCase__ : Optional[Any] = ''' '''.join(_A )
UpperCAmelCase__ : List[Any] = word
return word
def lowercase_ ( self : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for token in re.findall(self.pat , _A ):
UpperCAmelCase__ : str = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) )
return bpe_tokens
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : int , _A : List[str] ):
'''simple docstring'''
return self.decoder.get(_A )
def lowercase_ ( self : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = ''''''.join(_A )
UpperCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase_ ( self : int , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Tuple = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
UpperCAmelCase__ : Union[str, Any] = 0
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase__ : List[str] = token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Optional[Any] , _A : Any , _A : Dict=False , **_A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Tuple = ''' ''' + text
return (text, kwargs)
| 181
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase :Dict = logging.get_logger(__name__)
__UpperCAmelCase :List[str] = "▁"
__UpperCAmelCase :Any = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase :List[Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
__UpperCAmelCase :Tuple = {
"facebook/mbart-large-50-one-to-many-mmt": 1_0_2_4,
}
# fmt: off
__UpperCAmelCase :List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Dict , snake_case : Dict , snake_case : List[Any]=None , snake_case : Tuple=None , snake_case : int="</s>" , snake_case : Tuple="</s>" , snake_case : Any="<s>" , snake_case : Tuple="<unk>" , snake_case : Any="<pad>" , snake_case : str="<mask>" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
__UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase : List[Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case , tgt_lang=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
__UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
__UpperCAmelCase : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : str = len(self.sp_model )
__UpperCAmelCase : Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case )
}
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
__UpperCAmelCase : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCAmelCase : Dict = src_lang if src_lang is not None else '''en_XX'''
__UpperCAmelCase : str = self.lang_code_to_id[self._src_lang]
__UpperCAmelCase : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__ ( self : str ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Optional[Any] , snake_case : str ) -> None:
__UpperCAmelCase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : Tuple = self.__dict__.copy()
__UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Dict , snake_case : Dict ) -> None:
__UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : int ) -> Dict:
__UpperCAmelCase : List[str] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : str ) -> List[str]:
return self.sp_model.encode(snake_case , out_type=snake_case )
def lowerCamelCase__ ( self : List[Any] , snake_case : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Union[str, Any] = self.sp_model.PieceToId(snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Optional[Any] , snake_case : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[Any] ) -> Tuple:
__UpperCAmelCase : str = []
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(snake_case )
__UpperCAmelCase : List[Any] = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : List[str] = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , '''wb''' ) as fi:
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
def lowerCamelCase__ ( self : List[str] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
__UpperCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
__UpperCAmelCase : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case )) + suffix_ones
return prefix_ones + ([0] * len(snake_case )) + ([0] * len(snake_case )) + suffix_ones
def lowerCamelCase__ ( self : Tuple , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[Any] , snake_case : str , snake_case : Optional[str] , snake_case : Optional[str] , **snake_case : int ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__UpperCAmelCase : Tuple = src_lang
__UpperCAmelCase : Any = self(snake_case , add_special_tokens=snake_case , return_tensors=snake_case , **snake_case )
__UpperCAmelCase : Tuple = self.convert_tokens_to_ids(snake_case )
__UpperCAmelCase : List[Any] = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[str] , snake_case : str = "en_XX" , snake_case : Optional[List[str]] = None , snake_case : str = "ro_RO" , **snake_case : int , ) -> BatchEncoding:
__UpperCAmelCase : List[Any] = src_lang
__UpperCAmelCase : str = tgt_lang
return super().prepare_seqaseq_batch(snake_case , snake_case , **snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Any ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : str ) -> None:
__UpperCAmelCase : Optional[Any] = self.lang_code_to_id[src_lang]
__UpperCAmelCase : List[Any] = [self.cur_lang_code_id]
__UpperCAmelCase : Any = [self.eos_token_id]
def lowerCamelCase__ ( self : int , snake_case : str ) -> None:
__UpperCAmelCase : Union[str, Any] = self.lang_code_to_id[tgt_lang]
__UpperCAmelCase : str = [self.cur_lang_code_id]
__UpperCAmelCase : Any = [self.eos_token_id]
| 371
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCAmelCase :Tuple = "Create a default config file for Accelerate with only a few flags set."
def _a ( _lowercase : List[Any]="no" , _lowercase : str = default_json_config_file , _lowercase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Dict = Path(_lowercase )
path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase : int = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase : Optional[Any] = torch.cuda.device_count()
__UpperCAmelCase : List[str] = num_gpus
__UpperCAmelCase : int = False
if num_gpus > 1:
__UpperCAmelCase : Any = '''MULTI_GPU'''
else:
__UpperCAmelCase : int = '''NO'''
elif is_xpu_available() and use_xpu:
__UpperCAmelCase : List[Any] = torch.xpu.device_count()
__UpperCAmelCase : List[Any] = num_xpus
__UpperCAmelCase : Optional[int] = False
if num_xpus > 1:
__UpperCAmelCase : Any = '''MULTI_XPU'''
else:
__UpperCAmelCase : Optional[Any] = '''NO'''
elif is_npu_available():
__UpperCAmelCase : Dict = torch.npu.device_count()
__UpperCAmelCase : Any = num_npus
__UpperCAmelCase : Any = False
if num_npus > 1:
__UpperCAmelCase : Dict = '''MULTI_NPU'''
else:
__UpperCAmelCase : Optional[int] = '''NO'''
else:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Tuple = '''NO'''
__UpperCAmelCase : List[Any] = ClusterConfig(**_lowercase )
config.to_json_file(_lowercase )
return path
def _a ( _lowercase : Union[str, Any] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parser.add_parser('''default''' , parents=_lowercase , help=_lowercase , formatter_class=_lowercase )
parser.add_argument(
'''--config_file''' , default=_lowercase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=_lowercase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=_lowercase )
return parser
def _a ( _lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 240
| 0
|
def lowerCAmelCase_ ( __a , __a ) -> list:
"""simple docstring"""
lowerCamelCase__: int =word.split()
def justify(__a , __a , __a ) -> str:
lowerCamelCase__: Tuple =max_width - width
lowerCamelCase__: str =len(__a )
if len(__a ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCamelCase__: List[Any] =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCamelCase__: str =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCamelCase__: str =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__a ):
num_spaces_between_words_list[i] += 1
lowerCamelCase__: Union[str, Any] =[]
for i in range(__a ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__a )
lowerCamelCase__: int =[]
lowerCamelCase__: list[str] =[]
lowerCamelCase__: Any =0
for word in words:
if width + len(__a ) + len(__a ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__a )
width += len(__a )
else:
# justify the line and add it to result
answer.append(justify(__a , __a , __a ) )
# reset new line and new width
lowerCamelCase__ , lowerCamelCase__: int =[word], len(__a )
lowerCamelCase__: str =max_width - width - len(__a )
answer.append(" ".join(__a ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ = TypeVar("""T""")
A__ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def snake_case ( self , _snake_case ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
__lowerCamelCase = {}
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _snake_case ):
"""simple docstring"""
return key in self.cache
def snake_case ( self , _snake_case ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_snake_case )
@classmethod
def snake_case ( cls , _snake_case = 128 ):
"""simple docstring"""
def cache_decorator_inner(_snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_snake_case )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE__ : Dict = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE__ : str = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
a__ : Dict = ["""input_ids""", """attention_mask"""]
a__ : List[Any] = DistilBertTokenizer
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : Tuple="[PAD]" , SCREAMING_SNAKE_CASE__ : Dict="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple="[MASK]" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) )
__lowerCamelCase = do_lower_case
__lowerCamelCase = strip_accents
__lowerCamelCase = tokenize_chinese_chars
__lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = do_lower_case
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=None ) -> Dict:
__lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
__lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 339
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Dict = """xmod"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
__lowerCamelCase = pre_norm
__lowerCamelCase = adapter_reduction_factor
__lowerCamelCase = adapter_layer_norm
__lowerCamelCase = adapter_reuse_layer_norm
__lowerCamelCase = ln_before_adapter
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = default_language
class lowerCAmelCase__ ( __lowercase ):
@property
def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 339
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase_ = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase_ = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase_ = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase_ = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase_ = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase_ = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase_ = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase_ = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase_ = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase_ = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase_ = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase_ = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase_ = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase_ = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase_ = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase_ = value.float()
for key, value in codebook_state_dict.items():
lowercase_ = value
return upgrade
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> int:
'''simple docstring'''
if config_path is not None:
lowercase_ = FlavaConfig.from_pretrained(__lowerCAmelCase )
else:
lowercase_ = FlavaConfig()
lowercase_ = FlavaForPreTraining(__lowerCAmelCase ).eval()
lowercase_ = convert_dalle_checkpoint(__lowerCAmelCase , __lowerCAmelCase , save_checkpoint=__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
else:
lowercase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )
lowercase_ = upgrade_state_dict(__lowerCAmelCase , __lowerCAmelCase )
hf_model.load_state_dict(__lowerCAmelCase )
lowercase_ = hf_model.state_dict()
lowercase_ = count_parameters(__lowerCAmelCase )
lowercase_ = count_parameters(__lowerCAmelCase ) + count_parameters(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCAmelCase : List[Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 136
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCAmelCase : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
UpperCAmelCase : Optional[Any] = None
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__lowerCAmelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__lowerCAmelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase_ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
def remove_articles(__lowerCAmelCase ):
return ARTICLES_REGEX.sub(""" """ , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
lowercase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = get_tokens(__lowerCAmelCase )
lowercase_ = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
lowercase_ = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase_ = 1.0 * num_same / len(__lowerCAmelCase )
lowercase_ = 1.0 * num_same / len(__lowerCAmelCase )
lowercase_ = (2 * precision * recall) / (precision + recall)
return fa
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = {}
lowercase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase_ = qa["""id"""]
lowercase_ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase_ = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
lowercase_ = preds[qid]
# Take max over all gold answers
lowercase_ = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
lowercase_ = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = {}
for qid, s in scores.items():
lowercase_ = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase_ = float(not qid_to_has_ans[qid] )
else:
lowercase_ = s
return new_scores
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
if not qid_list:
lowercase_ = len(__lowerCAmelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
lowercase_ = len(__lowerCAmelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
for k in new_eval:
lowercase_ = new_eval[k]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
plt.step(__lowerCAmelCase , __lowerCAmelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[Any]:
'''simple docstring'''
lowercase_ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
lowercase_ = 0.0
lowercase_ = 1.0
lowercase_ = 0.0
lowercase_ = [1.0]
lowercase_ = [0.0]
lowercase_ = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase_ = true_pos / float(i + 1 )
lowercase_ = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
lowercase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
lowercase_ = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase_ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_exact""" )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_f1""" )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """pr_oracle""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
lowercase_ = [na_probs[k] for k in qid_list]
lowercase_ = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase_ = num_no_ans
lowercase_ = cur_score
lowercase_ = 0.0
lowercase_ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase_ = scores[qid]
else:
if preds[qid]:
lowercase_ = -1
else:
lowercase_ = 0
cur_score += diff
if cur_score > best_score:
lowercase_ = cur_score
lowercase_ = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = best_exact
lowercase_ = exact_thresh
lowercase_ = best_fa
lowercase_ = fa_thresh
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
lowercase_ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase_ = json.load(__lowerCAmelCase )
else:
lowercase_ = {k: 0.0 for k in preds}
lowercase_ = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
lowercase_ = [k for k, v in qid_to_has_ans.items() if v]
lowercase_ = [k for k, v in qid_to_has_ans.items() if not v]
lowercase_ , lowercase_ = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
lowercase_ = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """HasAns""" )
if no_ans_qids:
lowercase_ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 136
| 1
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _UpperCamelCase (a__ :int , a__ :List[str] ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = torch.permute(_SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ):
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def _UpperCamelCase (a__ :Tuple , a__ :List[str] , a__ :Tuple ):
if "metadata" in layer:
UpperCamelCase__ = layer.split("""metadata""" )
UpperCamelCase__ = "".join(split_layer[0] )[:-1]
UpperCamelCase__ = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCamelCase__ = layer.split("""kvstore""" )
UpperCamelCase__ = "".join(split_layer[0] )[:-1]
UpperCamelCase__ = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCamelCase__ = layer.split("""/""" )
UpperCamelCase__ = "/".join(split_layer[:-1] )
UpperCamelCase__ = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCamelCase__ = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
UpperCamelCase__ = "file"
else:
UpperCamelCase__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _UpperCamelCase (a__ :str , a__ :List[str] ):
UpperCamelCase__ = rename_keys(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {}
for k, v in current_block.items():
UpperCamelCase__ = v
UpperCamelCase__ = new_current_block
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _UpperCamelCase (a__ :List[str] , a__ :List[Any] , a__ :Any , a__ :Optional[int] , a__ :str = WEIGHTS_NAME ):
UpperCamelCase__ = convert_file_size_to_int(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = {}
UpperCamelCase__ = 0
UpperCamelCase__ = 0
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCamelCase__ = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
UpperCamelCase__ = flatten_dict(_SCREAMING_SNAKE_CASE , sep="""/""" )
UpperCamelCase__ = {}
for layer in checkpoint_info.keys():
UpperCamelCase__ = get_key_and_tensorstore_dict(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
UpperCamelCase__ = content
else:
UpperCamelCase__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCamelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCamelCase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCamelCase__ = rename_base_flax_keys(tuple(key.split("""/""" ) ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase__ = "/".join(_SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCamelCase__ = os.path.join(
_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCamelCase__ = {}
UpperCamelCase__ = 0
UpperCamelCase__ = raw_weights.to(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCamelCase__ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin""" ) # len(sharded_state_dicts):05d}
UpperCamelCase__ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = shard
for key in shard:
UpperCamelCase__ = shard_file
# Add the metadata
UpperCamelCase__ = {"total_size": total_size}
UpperCamelCase__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase__ = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n"
f.write(_SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCamelCase__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _UpperCamelCase ():
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCamelCase__ = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCamelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCamelCase__ = TaTokenizer.from_pretrained("""t5-small""" )
UpperCamelCase__ = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
UpperCamelCase__ = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
UpperCamelCase__ = model.generate(_SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 351
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : str = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55
|
'''simple docstring'''
a_ : Any = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 55
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowercase : Tuple = 2_5_6_0_4_7
_lowercase : Union[str, Any] = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = NllbTokenizer
_a = NllbTokenizerFast
_a = True
_a = True
_a = {}
def snake_case ( self : Union[str, Any] )-> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Any =NllbTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : Tuple )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =NllbTokenizer(lowerCamelCase, keep_accents=lowerCamelCase )
lowerCamelCase__ : Any =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
lowerCamelCase__ : Union[str, Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
lowerCamelCase__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
lowerCamelCase__ : str =tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Any =self.tokenizer_class.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Any =tempfile.mkdtemp()
lowerCamelCase__ : Dict =tokenizer_r.save_pretrained(lowerCamelCase )
lowerCamelCase__ : str =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : List[str] =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Tuple =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : Optional[int] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Optional[Any] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase, lowerCamelCase )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Dict =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : str =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[int] =tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] =tokenizer_r.save_pretrained(lowerCamelCase, legacy_format=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any =tokenizer_r.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase, lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
def snake_case ( self : Tuple )-> int:
if not self.test_seqaseq:
return
lowerCamelCase__ : Tuple =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
lowerCamelCase__ : Optional[int] =[
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ : Optional[Any] =[
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
lowerCamelCase__ : Dict =tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase, tgt_texts=lowerCamelCase, max_length=3, max_target_length=10, return_tensors='''pt''', src_lang='''eng_Latn''', tgt_lang='''ron_Latn''', )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 10 )
# max_target_length will default to max_length if not specified
lowerCamelCase__ : Union[str, Any] =tokenizer.prepare_seqaseq_batch(
lowerCamelCase, tgt_texts=lowerCamelCase, max_length=3, return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 3 )
lowerCamelCase__ : Any =tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase, max_length=3, max_target_length=10, return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3 )
self.assertNotIn('''decoder_input_ids''', lowerCamelCase )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def snake_case ( self : Tuple )-> Union[str, Any]:
pass
def snake_case ( self : Any )-> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase__ : List[str] =[AddedToken('''<special>''', lstrip=lowerCamelCase )]
lowerCamelCase__ : int =self.rust_tokenizer_class.from_pretrained(
lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =tokenizer_r.encode('''Hey this is a <special> token''' )
lowerCamelCase__ : str =tokenizer_r.encode('''<special>''', add_special_tokens=lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowerCamelCase__ : Tuple =self.rust_tokenizer_class.from_pretrained(
lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =self.tokenizer_class.from_pretrained(
lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : str =tokenizer_p.encode('''Hey this is a <special> token''' )
lowerCamelCase__ : List[Any] =tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 'facebook/nllb-200-distilled-600M'
_a = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_a = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_a = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def snake_case ( cls : int )-> int:
lowerCamelCase__ : NllbTokenizer =NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''eng_Latn''', tgt_lang='''ron_Latn''' )
lowerCamelCase__ : Optional[Any] =1
return cls
def snake_case ( self : Any )-> Any:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''], 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''], 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''], 25_6057 )
def snake_case ( self : Any )-> Any:
lowerCamelCase__ : Any =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Optional[Any]:
self.assertIn(lowerCamelCase, self.tokenizer.all_special_ids )
# fmt: off
lowerCamelCase__ : Tuple =[RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
lowerCamelCase__ : Any =self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowerCamelCase__ : Dict =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> str:
lowerCamelCase__ : Optional[Any] =['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], lowerCamelCase )
lowerCamelCase__ : Dict =10
lowerCamelCase__ : int =self.tokenizer(lowerCamelCase, max_length=lowerCamelCase, truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1], 2 )
self.assertEqual(ids[0], lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
def snake_case ( self : Dict )-> Dict:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_6203, 3] )
def snake_case ( self : List[str] )-> List[str]:
lowerCamelCase__ : Union[str, Any] =tempfile.mkdtemp()
lowerCamelCase__ : int =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =NllbTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase )
@require_torch
def snake_case ( self : int )-> Dict:
lowerCamelCase__ : List[Any] =self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
lowerCamelCase__ : Optional[int] =shift_tokens_right(
batch['''labels'''], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 15), batch.input_ids.shape )
self.assertEqual((2, 15), batch.attention_mask.shape )
lowerCamelCase__ : Optional[int] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase )
self.assertEqual(lowerCamelCase, batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def snake_case ( self : Tuple )-> Optional[int]:
lowerCamelCase__ : List[Any] =self.tokenizer(self.src_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=3, return_tensors='''pt''' )
lowerCamelCase__ : Any =self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=10, return_tensors='''pt''' )
lowerCamelCase__ : Tuple =targets['''input_ids''']
lowerCamelCase__ : Dict =shift_tokens_right(
lowerCamelCase, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def snake_case ( self : List[str] )-> List[Any]:
lowerCamelCase__ : int =self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[25_6047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_6057,
}, )
@require_torch
def snake_case ( self : str )-> Any:
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : Any =self.tokenizer(
'''UN Chief says there is no military solution in Syria''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids, [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
lowerCamelCase__ : Optional[int] =False
lowerCamelCase__ : Optional[Any] =self.tokenizer(
'''UN Chief says there is no military solution in Syria''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids, [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 355
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowercase : Tuple = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
lowerCamelCase__ : str =VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : Dict =torch.manual_seed(0 )
lowerCamelCase__ : str =pipe(
image=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''', ).images
lowerCamelCase__ : Dict =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : List[Any] =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 272
| 0
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A__ : Dict = logging.get_logger(__name__)
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A__ : str = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A__ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a ( lowerCamelCase_ , lowerCamelCase_ = "student" , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ):
'''simple docstring'''
lowercase__ = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
lowercase__ = teacher.config.to_diff_dict()
try:
lowercase__ , lowercase__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowercase__ = teacher_e
if d is None:
lowercase__ = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
lowercase__ , lowercase__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowercase__ , lowercase__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowercase__ = teacher_e
if d is None:
lowercase__ = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
lowercase__ = teacher.config_class(**__lowerCAmelCase )
lowercase__ = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowercase__ = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowercase__ , lowercase__ = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowercase__ = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
lowercase__ = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
lowercase__ = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 207
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
snake_case : List[Any] = logging.getLogger(__name__)
def __lowercase ( __lowerCAmelCase : str=2 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : List[str]=1_6 , __lowerCAmelCase : int = 1_0 , __lowerCAmelCase : int = 2 ):
def get_dataset(__lowerCAmelCase : Dict ):
a__ = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
a__ = get_dataset(__lowerCAmelCase )
a__ = get_dataset(__lowerCAmelCase )
a__ = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
a__ = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=None ):
a__ = []
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
a__ , a__ = batch
a__ = model(__lowerCAmelCase )
a__ = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case_ (nn.Module ):
def __init__( self :Any ) -> Union[str, Any]:
super().__init__()
a__ = nn.Parameter(torch.randn(1 ) )
a__ = nn.Parameter(torch.randn(1 ) )
def lowerCamelCase__( self :List[str] ,__snake_case :Union[str, Any] ) -> str:
return x * self.a + self.b
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(total_limit=1 ,project_dir=__snake_case ,automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def lowerCamelCase__( self :List[Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
# Train baseline
a__ = Accelerator()
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
a__ = os.path.join(__snake_case ,'initial' )
accelerator.save_state(__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
a__ = train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = Accelerator()
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
accelerator.load_state(__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
a__ = train(2 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save everything
a__ = os.path.join(__snake_case ,'checkpoint' )
accelerator.save_state(__snake_case )
# Load everything back in and make sure all states work
accelerator.load_state(__snake_case )
test_rands += train(1 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
def lowerCamelCase__( self :str ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
a__ = train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
# Train partially
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=__snake_case )
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
a__ = train(2 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_1' ) )
test_rands += train(1 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
((a__) , (a__)) = model.a.item(), model.b.item()
a__ = optimizer.state_dict()
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
self.assertEqual(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = torch.tensor([1, 2, 3] )
a__ = torch.tensor([2, 3, 4] )
a__ = DummyModel()
a__ = torch.optim.Adam(net.parameters() )
a__ = Accelerator()
with self.assertRaises(__snake_case ) as ve:
accelerator.register_for_checkpointing(__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def lowerCamelCase__( self :List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
a__ = torch.optim.lr_scheduler.StepLR(__snake_case ,step_size=1 ,gamma=0.99 )
a__ , a__ = dummy_dataloaders()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# Save initial
accelerator.save_state()
a__ = scheduler.state_dict()
train(3 ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
self.assertNotEqual(__snake_case ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) )
self.assertEqual(__snake_case ,scheduler.state_dict() )
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
a__ = DummyModel()
a__ = ProjectConfiguration(automatic_checkpoint_naming=__snake_case ,total_limit=2 )
# Train baseline
a__ = Accelerator(project_dir=__snake_case ,project_config=__snake_case )
a__ = accelerator.prepare(__snake_case )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(__snake_case ,'checkpoints' ,'checkpoint_10' ) ) )
@require_cuda
def lowerCamelCase__( self :Dict ) -> str:
a__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
if __name__ == "__main__":
snake_case : Tuple = '''/tmp/accelerate/state_checkpointing'''
snake_case : str = DummyModel()
snake_case : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
snake_case : Union[str, Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
snake_case , snake_case : str = dummy_dataloaders()
snake_case : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
snake_case : Dict = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
snake_case , snake_case , snake_case , snake_case , snake_case : List[str] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
snake_case , snake_case : Any = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
snake_case : Any = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
snake_case : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
snake_case : int = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
snake_case : Optional[int] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 240
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[Any] ={
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =[
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =[
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowercase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ ( _lowercase : List[str]) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : Union[str, Any] = emb.weight.shape
a__ : str = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
a__ : Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( _lowercase : int , _lowercase : int=None) -> List[Any]:
"""simple docstring"""
a__ : List[str] = {}
for old_key in state_dict.keys():
a__ : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
a__ : Dict = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''')
else:
a__ : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""")
if "gate" in key:
a__ : Tuple = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""")
if "fc2" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc2.""" , """.ffn.fc2.""")
if "fc1" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""")
if ".encoder_attn." in key:
a__ : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""")
if "encoder_attn_layer_norm" in key:
a__ : Optional[Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""")
if "final_layer_norm" in key:
a__ : List[str] = key.replace("""final_layer_norm""" , """ff_layer_norm""")
a__ : str = state_dict[old_key]
return new_dict
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Dict , _lowercase : str = WEIGHTS_NAME) -> Tuple:
"""simple docstring"""
a__ : Tuple = []
a__ : Optional[Any] = 0
os.makedirs(_lowercase , exist_ok=_lowercase)
for expert in range(_lowercase):
a__ : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase):
a__ : List[str] = torch.load(_lowercase)["""model"""]
remove_ignore_keys_(_lowercase)
a__ : Tuple = rename_fairseq_keys(_lowercase , _lowercase)
a__ : str = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
torch.save(_lowercase , _lowercase)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(_lowercase)[0]].dtype)
# Add the last block
a__ : int = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
a__ : Union[str, Any] = torch.load(switch_checkpoint_path + """-shared.pt""")["""model"""]
remove_ignore_keys_(_lowercase)
a__ : List[str] = rename_fairseq_keys(_lowercase , _lowercase)
a__ : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase) == 1:
a__ : Optional[int] = os.path.join(_lowercase , _lowercase)
torch.save(_lowercase , _lowercase)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase)
# Otherwise, let's build the index
a__ : List[str] = {}
for idx, shard in enumerate(_lowercase):
a__ : Union[str, Any] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase):05d}.bin''')
a__ : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_lowercase , os.path.join(_lowercase , _lowercase))
for key in shard:
a__ : Tuple = shard_file
# Add the metadata
a__ : Tuple = {"""total_size""": total_size}
a__ : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase) , """w""" , encoding="""utf-8""") as f:
a__ : Dict = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase) + """\n"""
f.write(_lowercase)
return metadata, index
if __name__ == "__main__":
_lowercase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase : Tuple =parser.parse_args()
_lowercase , _lowercase : List[Any] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowercase : int =NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowercase : List[str] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 266
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
UpperCAmelCase__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = DistilBertTokenizer
def __init__( self : Any , A : List[Any]=None , A : Tuple=None , A : List[Any]=True , A : Union[str, Any]="[UNK]" , A : Optional[Any]="[SEP]" , A : str="[PAD]" , A : Any="[CLS]" , A : Optional[int]="[MASK]" , A : Any=True , A : List[Any]=None , **A : int , ) -> List[str]:
"""simple docstring"""
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , A) != do_lower_case
or normalizer_state.get('strip_accents' , A) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(A , normalizer_state.pop('type'))
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**A)
_UpperCAmelCase = do_lower_case
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : Optional[Any]=None) -> Dict:
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : str , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(A , name=A)
return tuple(A)
| 339
|
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339
| 1
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCamelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = json.load(open(cached_download(hf_hub_url(__A, __A, repo_type="dataset" ) ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = UpperCAmelCase__ = CvtConfig(num_labels=__A, idalabel=__A, labelaid=__A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/", 1 )[-1][4:6] == "13":
UpperCAmelCase__ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/", 1 )[-1][4:6] == "21":
UpperCAmelCase__ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase__ = [2, 2, 20]
UpperCAmelCase__ = [3, 12, 16]
UpperCAmelCase__ = [192, 768, 1_024]
UpperCAmelCase__ = CvtForImageClassification(__A )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCAmelCase__ = image_size
UpperCAmelCase__ = torch.load(__A, map_location=torch.device("cpu" ) )
UpperCAmelCase__ = OrderedDict()
UpperCAmelCase__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase__ = list_of_state_dict + cls_token(__A )
UpperCAmelCase__ = list_of_state_dict + embeddings(__A )
for cnt in range(config.depth[idx] ):
UpperCAmelCase__ = list_of_state_dict + attention(__A, __A )
UpperCAmelCase__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__A )
for i in range(len(__A ) ):
UpperCAmelCase__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 65
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
_UpperCAmelCase = {
"""camembert-base""": 512,
}
_UpperCAmelCase = """▁"""
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=["<s>NOTUSED", "</s>NOTUSED"] , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
A_ : Optional[Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
A_ : Union[str, Any] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
A_ : str = len(self.fairseq_tokens_to_ids )
A_ : Optional[Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
A_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.sp_model.encode(lowercase , out_type=lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowercase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = []
A_ : List[Any] = ''
A_ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
A_ : Optional[int] = True
A_ : Dict = []
else:
current_sub_tokens.append(lowercase )
A_ : Dict = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self , lowercase ):
"""simple docstring"""
A_ : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A_ : List[str] = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Dict = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , 'wb' ) as fi:
A_ : int = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 192
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
_UpperCAmelCase = """hopper-medium-v2"""
_UpperCAmelCase = gym.make(env_name)
_UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
_UpperCAmelCase = env.reset()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1000
_UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = env.step(denorm_actions)
_UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 192
| 1
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> None:
_a = generate_pascal_triangle(_A )
for row_idx in range(_A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _lowerCamelCase ( lowercase : int ) -> list[list[int]]:
if not isinstance(_A , _A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_a = []
for current_row_idx in range(_A ):
_a = populate_current_row(_A , _A )
triangle.append(_A )
return triangle
def _lowerCamelCase ( lowercase : list[list[int]] , lowercase : int ) -> list[int]:
_a = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_a , _a = 1, 1
for current_col_idx in range(1 , _A ):
calculate_current_element(
_A , _A , _A , _A )
return current_row
def _lowerCamelCase ( lowercase : list[list[int]] , lowercase : list[int] , lowercase : int , lowercase : int , ) -> None:
_a = triangle[current_row_idx - 1][current_col_idx - 1]
_a = triangle[current_row_idx - 1][current_col_idx]
_a = above_to_left_elt + above_to_right_elt
def _lowerCamelCase ( lowercase : int ) -> list[list[int]]:
if not isinstance(_A , _A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_a = [[1]]
for row_index in range(1 , _A ):
_a = [0] + result[-1] + [0]
_a = row_index + 1
# Calculate the number of distinct elements in a row
_a = sum(divmod(_A , 2 ) )
_a = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_a = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_a = row_first_half + row_second_half
result.append(_A )
return result
def _lowerCamelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase : Callable , lowercase : int ) -> None:
_a = F'{func.__name__}({value})'
_a = timeit(F'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_A , _A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 63
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 272
| 0
|
from typing import Any
import numpy as np
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
return np.array_equal(__snake_case, matrix.conjugate().T )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = v.conjugate().T
_UpperCamelCase = v_star.dot(__snake_case )
assert isinstance(__snake_case, np.ndarray )
return (v_star_dot.dot(__snake_case )) / (v_star.dot(__snake_case ))
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(__snake_case ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(__snake_case, __snake_case ) )
_UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__snake_case ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(__snake_case, __snake_case ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 359
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'bert'
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1e-12 , __a=0 , __a="absolute" , __a=True , __a=None , **__a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 100
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowercase_ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 266
|
"""simple docstring"""
import re
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
try:
__A = split_input(__UpperCamelCase )
if upper:
__A = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__A = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return to_simple_case(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
__A = to_simple_case(__UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''_''' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return to_complex_case(__UpperCamelCase , __UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 266
| 1
|
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase_ = HfApi()
UpperCAmelCase_ = {}
# fmt: off
UpperCAmelCase_ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase_ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase_ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase_ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase_ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase_ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase_ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase_ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase_ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase_ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase_ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase_ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase_ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase_ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase_ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase_ = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase_ = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
UpperCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 358
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = IFImgaImgSuperResolutionPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'})
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCAmelCase__ ( self: Optional[int] ):
return self._get_superresolution_dummy_components()
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase__ ( self: Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase__ ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self: List[str] ):
self._test_save_load_local()
def lowerCAmelCase__ ( self: List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.