code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 476
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
'''simple docstring'''
def __init__(self , __a , __a=12 , __a=7 , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=512 , __a=0.02 , __a=0 , __a=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
UpperCAmelCase__ = bos_token_id
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase__ = input_mask.numpy()
UpperCAmelCase__ , UpperCAmelCase__ = input_mask.shape
UpperCAmelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__a ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(__a )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlipTextModel(config=__a )
UpperCAmelCase__ = model(__a , attention_mask=__a , training=__a )
UpperCAmelCase__ = model(__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (TFBlipTextModel,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BlipTextModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFBlipTextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ (self , __a=True ) -> str:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=__a )
| 146
| 0
|
"""simple docstring"""
import math
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Dict = int(math.sqrt(_lowercase ) ) # Size of every segment
lowerCamelCase__ : int = [True] * (end + 1)
lowerCamelCase__ : Tuple = []
while start <= end:
if temp[start] is True:
in_prime.append(_lowercase )
for i in range(start * start , end + 1 , _lowercase ):
lowerCamelCase__ : Any = False
start += 1
prime += in_prime
lowerCamelCase__ : str = end + 1
lowerCamelCase__ : Union[str, Any] = min(2 * end , _lowercase )
while low <= n:
lowerCamelCase__ : Union[str, Any] = [True] * (high - low + 1)
for each in in_prime:
lowerCamelCase__ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_lowercase , high + 1 , _lowercase ):
lowerCamelCase__ : int = False
for j in range(len(_lowercase ) ):
if temp[j] is True:
prime.append(j + low )
lowerCamelCase__ : str = high + 1
lowerCamelCase__ : Any = min(high + end , _lowercase )
return prime
print(sieve(10**6))
| 708
|
"""simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
UpperCAmelCase : Tuple = int(input("Enter number: ").strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 121
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = """blip_text_model"""
def __init__( self , snake_case=3_0524 , snake_case=768 , snake_case=768 , snake_case=3072 , snake_case=768 , snake_case=12 , snake_case=8 , snake_case=512 , snake_case="gelu" , snake_case=1E-12 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=3_0522 , snake_case=2 , snake_case=0 , snake_case=102 , snake_case=True , snake_case=True , **snake_case , ):
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , sep_token_id=snake_case , **snake_case , )
lowercase = vocab_size
lowercase = hidden_size
lowercase = encoder_hidden_size
lowercase = intermediate_size
lowercase = projection_dim
lowercase = hidden_dropout_prob
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = max_position_embeddings
lowercase = layer_norm_eps
lowercase = hidden_act
lowercase = initializer_range
lowercase = attention_probs_dropout_prob
lowercase = is_decoder
lowercase = use_cache
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
lowercase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = """blip_vision_model"""
def __init__( self , snake_case=768 , snake_case=3072 , snake_case=512 , snake_case=12 , snake_case=12 , snake_case=384 , snake_case=16 , snake_case="gelu" , snake_case=1E-5 , snake_case=0.0 , snake_case=1E-10 , **snake_case , ):
super().__init__(**snake_case )
lowercase = hidden_size
lowercase = intermediate_size
lowercase = projection_dim
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = patch_size
lowercase = image_size
lowercase = initializer_range
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ):
cls._set_token_in_kwargs(snake_case )
lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(snake_case , **snake_case )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = """blip"""
_UpperCamelCase : Tuple = True
def __init__( self , snake_case=None , snake_case=None , snake_case=512 , snake_case=2.6_592 , snake_case=256 , **snake_case , ):
super().__init__(**snake_case )
if text_config is None:
lowercase = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
lowercase = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
lowercase = BlipTextConfig(**snake_case )
lowercase = BlipVisionConfig(**snake_case )
lowercase = self.vision_config.hidden_size
lowercase = projection_dim
lowercase = logit_scale_init_value
lowercase = 1.0
lowercase = 0.02
lowercase = image_text_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , **snake_case ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.text_config.to_dict()
lowercase = self.vision_config.to_dict()
lowercase = self.__class__.model_type
return output
| 84
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 2_0_0_0_0_0_0 ):
"""simple docstring"""
snake_case_ : Optional[Any] = [0 for i in range(n + 1 )]
snake_case_ : int = 1
snake_case_ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Optional[int] = 1
snake_case_ : Any = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 480
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
snake_case__ : Optional[int] =quote(SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' , revision=SCREAMING_SNAKE_CASE )
| 408
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ ='''openai-gpt'''
lowerCAmelCase__ ={
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=4_0478 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE="cls_index" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[Any] =vocab_size
snake_case__ : Optional[int] =n_positions
snake_case__ : Union[str, Any] =n_embd
snake_case__ : Dict =n_layer
snake_case__ : Dict =n_head
snake_case__ : Optional[int] =afn
snake_case__ : Tuple =resid_pdrop
snake_case__ : str =embd_pdrop
snake_case__ : Tuple =attn_pdrop
snake_case__ : Optional[int] =layer_norm_epsilon
snake_case__ : Any =initializer_range
snake_case__ : List[str] =summary_type
snake_case__ : Dict =summary_use_proj
snake_case__ : Any =summary_activation
snake_case__ : Optional[Any] =summary_first_dropout
snake_case__ : Tuple =summary_proj_to_labels
super().__init__(**__SCREAMING_SNAKE_CASE )
| 408
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_a : Optional[int] = logging.get_logger(__name__)
_a : Optional[Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
if config is None:
assert isinstance(self.model , _lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowerCAmelCase__ :int = self.model.config
else:
lowerCAmelCase__ :Tuple = config
lowerCAmelCase__ :List[str] = data_args
lowerCAmelCase__ :Any = self.config.tgt_vocab_size if isinstance(self.config , _lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
" padding.." )
if self.args.label_smoothing == 0:
lowerCAmelCase__ :Any = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase__ :Optional[int] = label_smoothed_nll_loss
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
if self.optimizer is None:
lowerCAmelCase__ :Any = ["bias", "LayerNorm.weight"]
lowerCAmelCase__ :Any = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase__ :Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase__ :Any = Adafactor
lowerCAmelCase__ :Any = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase__ :Tuple = AdamW
lowerCAmelCase__ :int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase__ :List[Any] = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase__ :Optional[int] = OSS(
params=_lowerCAmelCase , optim=_lowerCAmelCase , **_lowerCAmelCase , )
else:
lowerCAmelCase__ :List[Any] = optimizer_cls(_lowerCAmelCase , **_lowerCAmelCase )
if self.lr_scheduler is None:
lowerCAmelCase__ :List[Any] = self._get_lr_scheduler(_lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase__ :Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase__ :int = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase__ :Any = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_lowerCAmelCase )
return scheduler
def snake_case_ ( self ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase__ :Tuple = model(**_lowerCAmelCase , use_cache=_lowerCAmelCase )[0]
lowerCAmelCase__ :int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase__ ,lowerCAmelCase__ :Optional[Any] = model(**_lowerCAmelCase , labels=_lowerCAmelCase , use_cache=_lowerCAmelCase )[:2]
else:
# compute label smoothed loss
lowerCAmelCase__ :Optional[int] = model(**_lowerCAmelCase , use_cache=_lowerCAmelCase )[0]
lowerCAmelCase__ :Tuple = torch.nn.functional.log_softmax(_lowerCAmelCase , dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ :Optional[int] = self.loss_fn(_lowerCAmelCase , _lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = inputs.pop("labels" )
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = self._compute_loss(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return loss
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._prepare_inputs(_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase__ :str = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **_lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase__ :List[Any] = self._pad_tensors_to_max_len(_lowerCAmelCase , gen_kwargs["max_length"] )
lowerCAmelCase__ :Tuple = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase__ ,lowerCAmelCase__ :Any = self._compute_loss(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Tuple = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase__ :Union[str, Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase__ :Optional[Any] = self._pad_tensors_to_max_len(_lowerCAmelCase , gen_kwargs["max_length"] )
return (loss, logits, labels)
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
# If PAD token is not defined at least EOS token has to be defined
lowerCAmelCase__ :Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f''' padded to `max_length`={max_length}''' )
lowerCAmelCase__ :Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase__ :Any = tensor
return padded_tensor
| 145
|
from collections import Counter
from timeit import timeit
def snake_case__ ( UpperCAmelCase : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def snake_case__ ( UpperCAmelCase : str = "" ):
if len(UpperCAmelCase ) == 0:
return True
lowerCAmelCase__ :List[str] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase__ :dict[str, int] = {}
for character in lower_case_input_str:
lowerCAmelCase__ :Tuple = character_freq_dict.get(UpperCAmelCase , 0 ) + 1
lowerCAmelCase__ :Dict = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def snake_case__ ( UpperCAmelCase : str = "" ):
print("\nFor string = " , UpperCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
_a : Any = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 145
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[str] = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 417
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def _lowerCamelCase ( self ) -> int:
_A : List[Any] = 0
_A : List[str] = False
while not completed:
if counter == 1:
self.reset()
_A : Dict = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
_A , _A , _A : List[Any] = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def _lowerCamelCase ( self ) -> int:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[str]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self ) -> Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self ) -> str:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _lowerCamelCase ( self , UpperCAmelCase__=False ) -> Dict:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> Optional[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_A : Union[str, Any] = token_ids
_A : Dict = len(self.token_ids )
_A : Union[str, Any] = -1 # the index of the currently fulfilled step
_A : str = False
def _lowerCamelCase ( self ) -> Union[str, Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> str:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Dict:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}""" )
_A : str = False
_A : int = False
_A : List[str] = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
_A : Optional[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
_A : List[str] = True
_A : Union[str, Any] = completed
else:
# failed to make progress.
_A : Optional[int] = True
self.reset()
return stepped, completed, reset
def _lowerCamelCase ( self ) -> List[str]:
_A : List[str] = False
_A : int = 0
def _lowerCamelCase ( self ) -> Union[str, Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def _lowerCamelCase ( self , UpperCAmelCase__=False ) -> str:
_A : Tuple = PhrasalConstraint(self.token_ids )
if stateful:
_A : Optional[Any] = self.seqlen
_A : Any = self.fulfilled_idx
_A : Dict = self.completed
return new_constraint
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=True ) -> Optional[int]:
_A : int = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
_A : int = {}
for token_ids in nested_token_ids:
_A : Any = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
_A : Tuple = {}
_A : Optional[int] = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
_A : str = root
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> str:
_A : Tuple = self.trie
for current_token in current_seq:
_A : Any = start[current_token]
_A : List[Any] = list(start.keys() )
return next_tokens
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[str]:
_A : int = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Union[str, Any]:
_A : Dict = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
_A : Dict = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> Any:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_A : List[str] = DisjunctiveTrie(UpperCAmelCase__ )
_A : List[Any] = nested_token_ids
_A : Tuple = self.trie.max_height
_A : Any = []
_A : List[str] = False
def _lowerCamelCase ( self ) -> List[Any]:
_A : List[str] = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Dict:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}""" )
_A : Dict = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}""" )
_A : int = False
_A : Tuple = False
_A : List[Any] = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
_A : Tuple = True
else:
_A : str = True
self.reset()
_A : Optional[int] = self.trie.reached_leaf(self.current_seq )
_A : List[str] = completed
return stepped, completed, reset
def _lowerCamelCase ( self ) -> List[Any]:
_A : str = False
_A : Any = []
def _lowerCamelCase ( self ) -> Any:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _lowerCamelCase ( self , UpperCAmelCase__=False ) -> Union[str, Any]:
_A : int = DisjunctiveConstraint(self.token_ids )
if stateful:
_A : Optional[Any] = self.seqlen
_A : Tuple = self.current_seq
_A : List[str] = self.completed
return new_constraint
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> int:
_A : Tuple = constraints
# max # of steps required to fulfill a given constraint
_A : List[Any] = max([c.seqlen for c in constraints] )
_A : Optional[int] = len(UpperCAmelCase__ )
_A : int = False
self.init_state()
def _lowerCamelCase ( self ) -> Any:
_A : Optional[Any] = []
_A : Any = None
_A : List[Any] = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def _lowerCamelCase ( self ) -> List[str]:
_A : Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _lowerCamelCase ( self ) -> Dict:
_A : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_A : Dict = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
_A : Tuple = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[str]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_A , _A : List[str] = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
_A , _A : Tuple = False, False
if self.completed:
_A : Optional[Any] = True
_A : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_A , _A , _A : Optional[Any] = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
_A : List[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_A : List[Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
_A : List[str] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
_A , _A , _A : Tuple = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
_A : Any = None
if not complete and stepped:
_A : List[str] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_A : Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_A : Tuple = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowerCamelCase ( self , UpperCAmelCase__=True ) -> Optional[int]:
_A : Optional[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_A : List[str] = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_A : int = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
_A : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 417
| 1
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> Optional[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
snake_case = [True] * (num + 1)
snake_case = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase_ ):
snake_case = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 369
|
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : int = (boundary[1] - boundary[0]) / steps
lowerCAmelCase__ : Optional[int] = boundary[0]
lowerCAmelCase__ : int = boundary[1]
lowerCAmelCase__ : Tuple = make_points(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : str = 0.0
y += (h / 2.0) * f(lowerCamelCase_)
for i in x_i:
# print(i)
y += h * f(lowerCamelCase_)
y += (h / 2.0) * f(lowerCamelCase_)
return y
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Dict):
'''simple docstring'''
lowerCAmelCase__ : Dict = a + h
while x < (b - h):
yield x
lowerCAmelCase__ : Any = x + h
def lowerCAmelCase__ ( lowerCamelCase_ : str): # enter your function here
'''simple docstring'''
lowerCAmelCase__ : List[str] = (x - 0) * (x - 0)
return y
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 0.0 # Lower bound of integration
lowerCAmelCase__ : Optional[int] = 1.0 # Upper bound of integration
lowerCAmelCase__ : List[str] = 10.0 # define number of steps or resolution
lowerCAmelCase__ : Dict = [a, b] # define boundary of integration
lowerCAmelCase__ : Any = method_a(lowerCamelCase_ ,lowerCamelCase_)
print(f"""y = {y}""")
if __name__ == "__main__":
main()
| 647
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 ) -> Union[str, Any]:
lowerCamelCase : int = right or len(A__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(A__ , A__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__snake_case = logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : List[str] = '''sequence-classification'''
def __init__( self , lowerCamelCase__ ) -> List[Any]:
if type(lowerCamelCase__ ) == dict:
lowercase__ : Dict = Namespace(**lowerCamelCase__ )
lowercase__ : Any = glue_output_modes[hparams.task]
lowercase__ : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCamelCase__ , lowerCamelCase__ , self.mode )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> int:
return self.model(**lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
lowercase__ : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase__ : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase__ : List[Any] = self(**lowerCamelCase__ )
lowercase__ : Dict = outputs[0]
lowercase__ : Optional[Any] = self.trainer.lr_schedulers[0]["""scheduler"""]
lowercase__ : Optional[Any] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCAmelCase__( self ) -> int:
lowercase__ : int = self.hparams
lowercase__ : int = processors[args.task]()
lowercase__ : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
lowercase__ : str = self._feature_file(lowerCamelCase__ )
if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCamelCase__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowercase__ : Union[str, Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
lowercase__ : Any = convert_examples_to_features(
lowerCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowerCamelCase__ )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> DataLoader:
lowercase__ : Optional[int] = """dev""" if mode == """test""" else mode
lowercase__ : Union[str, Any] = self._feature_file(lowerCamelCase__ )
logger.info("""Loading features from cached file %s""" , lowerCamelCase__ )
lowercase__ : List[Any] = torch.load(lowerCamelCase__ )
lowercase__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase__ : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase__ : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase__ : Any = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
lowercase__ : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase__ : Tuple = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowercase__ : Tuple = self(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs[:2]
lowercase__ : Any = logits.detach().cpu().numpy()
lowercase__ : int = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase__( self , lowerCamelCase__ ) -> tuple:
lowercase__ : Dict = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
lowercase__ : List[str] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase__ : Dict = np.argmax(lowerCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase__ : Optional[Any] = np.squeeze(lowerCamelCase__ )
lowercase__ : Dict = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowercase__ : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : List[Any] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCamelCase__ , lowerCamelCase__ )}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : Optional[int] = results
return ret, preds_list, out_label_list
def UpperCAmelCase__( self , lowerCamelCase__ ) -> dict:
lowercase__ , lowercase__ , lowercase__ : Tuple = self._eval_end(lowerCamelCase__ )
lowercase__ : Dict = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase__( self , lowerCamelCase__ ) -> dict:
lowercase__ , lowercase__ , lowercase__ : Tuple = self._eval_end(lowerCamelCase__ )
lowercase__ : Optional[int] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase__( lowerCamelCase__ , lowerCamelCase__ ) -> str:
BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__ )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowerCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCamelCase__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def _lowerCamelCase ( ):
lowercase__ : Optional[int] = argparse.ArgumentParser()
add_generic_args(lowerCamelCase__ , os.getcwd() )
lowercase__ : Tuple = GLUETransformer.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase__ : str = os.path.join(
"""./results""" , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
lowercase__ : Optional[Any] = GLUETransformer(lowerCamelCase__ )
lowercase__ : Optional[int] = generic_train(lowerCamelCase__ , lowerCamelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase__ : Any = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCamelCase__ ) )
lowercase__ : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 200
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__snake_case = 'src/transformers'
__snake_case = 'docs/source/en'
__snake_case = '.'
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ):
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase__ : List[str] = f.readlines()
# Find the start prompt.
lowercase__ : str = 0
while not lines[start_index].startswith(lowerCamelCase__ ):
start_index += 1
start_index += 1
lowercase__ : Optional[Any] = start_index
while not lines[end_index].startswith(lowerCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__snake_case = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__snake_case = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__snake_case = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__snake_case = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCamelCase ( lowerCamelCase__ : List[Any] ):
lowercase__ : Union[str, Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase__ )
return [m.group(0 ) for m in matches]
def _lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ):
lowercase__ : Any = 2 if text == """✅""" or text == """❌""" else len(lowerCamelCase__ )
lowercase__ : int = (width - text_length) // 2
lowercase__ : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCamelCase ( ):
lowercase__ : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase__ : Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase__ : str = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase__ : Tuple = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Optional[Any] = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Optional[int] = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Dict = collections.defaultdict(lowerCamelCase__ )
lowercase__ : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase__ ):
lowercase__ : Optional[Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase__ : List[Any] = slow_tokenizers
lowercase__ : List[str] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase__ : Tuple = fast_tokenizers
lowercase__ : str = attr_name[:-13]
elif _re_tf_models.match(lowerCamelCase__ ) is not None:
lowercase__ : Tuple = tf_models
lowercase__ : List[Any] = _re_tf_models.match(lowerCamelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase__ ) is not None:
lowercase__ : List[Any] = flax_models
lowercase__ : List[Any] = _re_flax_models.match(lowerCamelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase__ ) is not None:
lowercase__ : Union[str, Any] = pt_models
lowercase__ : Optional[int] = _re_pt_models.match(lowerCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase__ : Tuple = True
break
# Try again after removing the last word in the name
lowercase__ : Union[str, Any] = """""".join(camel_case_split(lowerCamelCase__ )[:-1] )
# Let's build that table!
lowercase__ : Union[str, Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase__ : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase__ : str = [len(lowerCamelCase__ ) + 2 for c in columns]
lowercase__ : Tuple = max([len(lowerCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase__ : List[Any] = """|""" + """|""".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for c, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase__ : List[Any] = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase__ : int = model_name_to_prefix[name]
lowercase__ : List[str] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for l, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + "|\n"
return table
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any]=False ):
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = _find_text_in_file(
filename=os.path.join(lowerCamelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase__ : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 200
| 1
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[Any]=5_6 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=9_9 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : Any="gelu_new" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : List[Any]="block_sparse" , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : List[Any]=3 , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
__snake_case = rescale_embeddings
__snake_case = attention_type
__snake_case = use_bias
__snake_case = block_size
__snake_case = num_random_blocks
def lowercase__ ( self : Any ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Tuple ):
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class a_ ( UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : int = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase_ : str = False
lowercase_ : Optional[int] = False
def lowercase__ ( self : str ):
__snake_case = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : int ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : int ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : Any ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : str ):
super().test_hidden_states_output()
@slow
def lowercase__ ( self : str ):
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(__lowerCAmelCase )
def lowercase__ ( self : Optional[Any] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : List[str] ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = model_class(__lowerCAmelCase )
@jax.jit
def model_jitted(__lowerCAmelCase : List[str] , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str ):
return model(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase )
with self.subTest('JIT Enabled' ):
__snake_case = model_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__snake_case = model_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=1E-5 , __lowerCAmelCase : str="outputs" , __lowerCAmelCase : Any=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
| 701
|
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( UpperCAmelCase__ ):
lowercase_ : int = ['''image_processor''', '''tokenizer''']
lowercase_ : Dict = '''AutoImageProcessor'''
lowercase_ : Dict = '''AutoTokenizer'''
def __init__( self : int , __lowerCAmelCase : Any=None , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : List[Any] ):
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
__snake_case = kwargs.pop('feature_extractor' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = self.image_processor
__snake_case = False
def __call__( self : Dict , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
__snake_case = kwargs.pop('images' , __lowerCAmelCase )
__snake_case = kwargs.pop('text' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__snake_case = args[0]
__snake_case = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__snake_case = self.image_processor(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
__snake_case = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case = encodings['input_ids']
return inputs
def lowercase__ ( self : str , *__lowerCAmelCase : str , **__lowerCAmelCase : Tuple ):
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def lowercase__ ( self : Union[str, Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Any ):
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@contextmanager
def lowercase__ ( self : Any ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__snake_case = True
__snake_case = self.tokenizer
yield
__snake_case = self.image_processor
__snake_case = False
def lowercase__ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[Any]=None ):
if added_vocab is None:
__snake_case = self.tokenizer.get_added_vocab()
__snake_case = {}
while tokens:
__snake_case = re.search(r'<s_(.*?)>' , __lowerCAmelCase , re.IGNORECASE )
if start_token is None:
break
__snake_case = start_token.group(1 )
__snake_case = re.search(rF'</s_{key}>' , __lowerCAmelCase , re.IGNORECASE )
__snake_case = start_token.group()
if end_token is None:
__snake_case = tokens.replace(__lowerCAmelCase , '' )
else:
__snake_case = end_token.group()
__snake_case = re.escape(__lowerCAmelCase )
__snake_case = re.escape(__lowerCAmelCase )
__snake_case = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , __lowerCAmelCase , re.IGNORECASE )
if content is not None:
__snake_case = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__snake_case = self.tokenajson(__lowerCAmelCase , is_inner_value=__lowerCAmelCase , added_vocab=__lowerCAmelCase )
if value:
if len(__lowerCAmelCase ) == 1:
__snake_case = value[0]
__snake_case = value
else: # leaf nodes
__snake_case = []
for leaf in content.split(r'<sep/>' ):
__snake_case = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__snake_case = leaf[1:-2] # for categorical special tokens
output[key].append(__lowerCAmelCase )
if len(output[key] ) == 1:
__snake_case = output[key][0]
__snake_case = tokens[tokens.find(__lowerCAmelCase ) + len(__lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__lowerCAmelCase , added_vocab=__lowerCAmelCase )
if len(__lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 427
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Optional[int] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = AlbertTokenizer
A__ : Tuple = AlbertTokenizerFast
A__ : Optional[Any] = True
A__ : int = True
A__ : List[Any] = True
def _lowercase ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase : Optional[Any] = AlbertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , _snake_case ) -> int:
_UpperCamelCase : int = '''this is a test'''
_UpperCamelCase : Optional[Any] = '''this is a test'''
return input_text, output_text
def _lowercase ( self ) -> Optional[Any]:
_UpperCamelCase : List[str] = '''<pad>'''
_UpperCamelCase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def _lowercase ( self ) -> Union[str, Any]:
_UpperCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(_snake_case ) , 30000 )
def _lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : Tuple = self.get_rust_tokenizer()
_UpperCamelCase : Dict = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_snake_case )
_UpperCamelCase : str = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCamelCase : List[str] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCamelCase : str = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCamelCase : str = self.get_rust_tokenizer()
_UpperCamelCase : Tuple = tokenizer.encode(_snake_case )
_UpperCamelCase : Any = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Dict = AlbertTokenizer(_snake_case , keep_accents=_snake_case )
_UpperCamelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [48, 25, 21, 1289] )
_UpperCamelCase : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(_snake_case , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : int = AlbertTokenizer(_snake_case )
_UpperCamelCase : int = tokenizer.encode('''sequence builders''' )
_UpperCamelCase : List[Any] = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(_snake_case )
_UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase ( self ) -> str:
# fmt: off
_UpperCamelCase : str = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 683
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case: int = logging.get_logger(__name__)
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
if isinstance(A_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A_ ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
a_ : Dict = size if size is not None else {"""shortest_edge""": 2_24}
a_ : Union[str, Any] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
a_ : Tuple = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
a_ : List[Any] = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" )
a_ : List[Any] = do_resize
a_ : int = size
a_ : Dict = do_center_crop
a_ : int = crop_size
a_ : Union[str, Any] = resample
a_ : Optional[int] = do_rescale
a_ : Optional[int] = rescale_factor
a_ : Dict = do_normalize
a_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
a_ : List[Any] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" in size:
a_ : Optional[int] = get_resize_output_image_size(lowerCAmelCase_ , size["""shortest_edge"""] , default_to_square=lowerCAmelCase_ )
elif "height" in size and "width" in size:
a_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Optional[Any] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
'''simple docstring'''
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
a_ : List[str] = to_numpy_array(lowerCAmelCase_ )
if do_resize:
a_ : Optional[Any] = self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ )
if do_center_crop:
a_ : str = self.center_crop(lowerCAmelCase_ , size=lowerCAmelCase_ )
if do_rescale:
a_ : int = self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ )
if do_normalize:
a_ : Dict = self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ )
a_ : str = to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ )
return image
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Any = do_resize if do_resize is not None else self.do_resize
a_ : Dict = resample if resample is not None else self.resample
a_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : Any = do_rescale if do_rescale is not None else self.do_rescale
a_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
a_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
a_ : List[str] = image_std if image_std is not None else self.image_std
a_ : Union[str, Any] = size if size is not None else self.size
a_ : Optional[Any] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
a_ : List[str] = crop_size if crop_size is not None else self.crop_size
a_ : int = get_size_dict(lowerCAmelCase_ , param_name="""crop_size""" )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
a_ : List[Any] = make_batched(lowerCAmelCase_ )
a_ : int = [
[
self._preprocess_image(
image=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , crop_size=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ , rescale_factor=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , )
for img in video
]
for video in videos
]
a_ : Optional[int] = {"""pixel_values""": videos}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 460
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _snake_case ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
a_ : List[str] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , A_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _snake_case ( ):
"""simple docstring"""
assert _test_patching.open is open
a_ : Tuple = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , A_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _snake_case ( ):
"""simple docstring"""
a_ : Any = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , A_ ):
pass
def _snake_case ( ):
"""simple docstring"""
a_ : int = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , A_ ) is None
with patch_submodule(_test_patching , """len""" , A_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _snake_case ( ):
"""simple docstring"""
a_ : List[str] = """__test_patch_submodule_start_and_stop_mock__"""
a_ : int = patch_submodule(_test_patching , """open""" , A_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _snake_case ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
a_ : Any = """__test_patch_submodule_successive_join__"""
a_ : Optional[Any] = """__test_patch_submodule_successive_dirname__"""
a_ : Optional[int] = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , A_ ):
with patch_submodule(_test_patching , """os.rename""" , A_ ):
with patch_submodule(_test_patching , """os.path.dirname""" , A_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , A_ ):
with patch_submodule(_test_patching , """os.path.join""" , A_ ):
with patch_submodule(_test_patching , """os.path.dirname""" , A_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _snake_case ( ):
"""simple docstring"""
a_ : Optional[int] = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , A_ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , A_ ):
pass
| 460
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__snake_case = logging.getLogger(__name__)
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_a = field(default=A__ , metadata={'help': 'Whether to SortishSamler or not.'} )
_a = field(
default=A__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_a = field(default=A__ , metadata={'help': 'whether to use adafactor'} )
_a = field(
default=A__ , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_a = field(
default=A__ , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_a = field(default=A__ , metadata={'help': 'Dropout probability. Goes into model.config.'} )
_a = field(
default=A__ , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_a = field(
default='linear' , metadata={'help': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 189
| 0
|
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A :
UpperCamelCase = 42
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""Translation""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self :Union[str, Any] ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __A :
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = None
UpperCamelCase = field(default="""TranslationVariableLanguages""" , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =sorted(set(self.languages ) ) if self.languages else None
__magic_name__ : Optional[int] =len(self.languages ) if self.languages else None
def __call__( self :List[str] ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def A__ ( self :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Optional[int] =set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ : Any =[]
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ : List[str] =zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def A__ ( self :List[Any] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 21
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
A_ = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
A_ = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
lowercase = set(UpperCAmelCase )
return pairs
class __lowercase ( _A ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Any="<mask>" , **__lowerCamelCase : int , ) -> Any:
'''simple docstring'''
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
lowercase = vocab_file
lowercase = merges_file
lowercase = {}
lowercase = 0
lowercase = 1
lowercase = 2
lowercase = 3
self.add_from_file(__lowerCamelCase )
lowercase = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
lowercase = merges_handle.read().split('''\n''' )[:-1]
lowercase = [tuple(merge.split()[:-1] ) for merge in merges]
lowercase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowercase = {}
def __a ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def __a ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : int ) -> str:
'''simple docstring'''
return len(self.encoder )
def __a ( self : int ) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : int , __lowerCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase = tuple(__lowerCamelCase )
lowercase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowercase = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase ,lowercase = bigram
lowercase = []
lowercase = 0
while i < len(__lowerCamelCase ):
try:
lowercase = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(__lowerCamelCase )
lowercase = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowercase = get_pairs(__lowerCamelCase )
lowercase = '''@@ '''.join(__lowerCamelCase )
lowercase = word[:-4]
lowercase = word
return word
def __a ( self : List[str] , __lowerCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase = []
lowercase = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def __a ( self : Tuple , __lowerCamelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self : str , __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __a ( self : Optional[Any] , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
lowercase = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __a ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def __a ( self : str , __lowerCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
lowercase = f.readlines()
for lineTmp in lines:
lowercase = lineTmp.strip()
lowercase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowercase = line[:idx]
lowercase = len(self.encoder )
| 604
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( a__ ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=1_3 , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : str=3_2 , SCREAMING_SNAKE_CASE__ : Dict=5 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str="last" , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Union[str, Any]:
a_ : Union[str, Any] = parent
a_ : int = batch_size
a_ : Union[str, Any] = seq_length
a_ : Tuple = is_training
a_ : int = use_input_lengths
a_ : List[str] = use_token_type_ids
a_ : List[Any] = use_labels
a_ : Union[str, Any] = gelu_activation
a_ : int = sinusoidal_embeddings
a_ : Union[str, Any] = causal
a_ : Any = asm
a_ : Union[str, Any] = n_langs
a_ : int = vocab_size
a_ : str = n_special
a_ : Optional[int] = hidden_size
a_ : Dict = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : Tuple = hidden_dropout_prob
a_ : str = attention_probs_dropout_prob
a_ : Tuple = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : Dict = type_sequence_label_size
a_ : Dict = initializer_range
a_ : Optional[int] = num_labels
a_ : Tuple = num_choices
a_ : List[str] = summary_type
a_ : Union[str, Any] = use_proj
a_ : List[Any] = scope
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : str = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Union[str, Any] = None
if self.use_input_lengths:
a_ : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a_ : Union[str, Any] = None
if self.use_token_type_ids:
a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a_ : Any = None
a_ : Union[str, Any] = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
a_ : str = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , ) -> Tuple:
a_ : List[Any] = FlaubertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : Dict = model(lowercase__ , lengths=lowercase__ , langs=lowercase__ )
a_ : Dict = model(lowercase__ , langs=lowercase__ )
a_ : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any:
a_ : List[str] = FlaubertWithLMHeadModel(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : Optional[int] = model(lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , ) -> Tuple:
a_ : List[Any] = FlaubertForQuestionAnsweringSimple(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : List[str] = model(lowercase__ )
a_ : Tuple = model(lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , ) -> str:
a_ : Union[str, Any] = FlaubertForQuestionAnswering(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : int = model(lowercase__ )
a_ : Dict = model(
lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , cls_index=lowercase__ , is_impossible=lowercase__ , p_mask=lowercase__ , )
a_ : str = model(
lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , cls_index=lowercase__ , is_impossible=lowercase__ , )
(a_ ) : Tuple = result_with_labels.to_tuple()
a_ : List[Any] = model(lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
(a_ ) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict:
a_ : Any = FlaubertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : List[str] = model(lowercase__ )
a_ : str = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Optional[Any]:
a_ : Tuple = self.num_labels
a_ : Union[str, Any] = FlaubertForTokenClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , ) -> int:
a_ : str = self.num_choices
a_ : List[Any] = FlaubertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ : Dict = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
a_ : Any = self.prepare_config_and_inputs()
(
a_
) : Optional[int] = config_and_inputs
a_ : Dict = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( a__ , a__ , unittest.TestCase ):
snake_case__ : Optional[int] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=False ) -> List[Any]:
a_ : Optional[int] = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
a_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : Dict = FlaubertModelTester(self )
a_ : Optional[int] = ConfigTester(self , config_class=lowercase__ , emb_dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[str] = FlaubertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ : List[str] = True
a_ : List[Any] = model_class(config=lowercase__ )
a_ : str = self._prepare_for_class(lowercase__ , lowercase__ )
a_ : int = torch.jit.trace(
lowercase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ , os.path.join(lowercase__ , 'traced_model.pt' ) )
a_ : Dict = torch.jit.load(os.path.join(lowercase__ , 'traced_model.pt' ) , map_location=lowercase__ )
loaded(inputs_dict['input_ids'].to(lowercase__ ) , inputs_dict['attention_mask'].to(lowercase__ ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : Tuple = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
a_ : int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
a_ : str = model(lowercase__ )[0]
a_ : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowercase__ )
a_ : List[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
| 703
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : int = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : Dict = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
UpperCAmelCase_ : Tuple = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
UpperCAmelCase_ : Optional[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Any , __A : List[Any]=False , __A : Tuple=False , __A : Any=True , __A : Any=False , __A : Any="dummy_doc" ) -> List[str]:
"""simple docstring"""
a_ : List[str] = {doc: key_lines}
a_ : Optional[Any] = {doc: sys_lines}
a_ : List[Any] = {}
a_ : Tuple = 0
a_ : List[str] = 0
a_ : Union[str, Any] = 0
a_ : List[Any] = 0
a_ : List[str] = 0
a_ : Union[str, Any] = 0
a_ , a_ : List[Any] = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A )
key_singletons_num += singletons_num
if NP_only or min_span:
a_ : Union[str, Any] = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
a_ , a_ : Union[str, Any] = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A )
sys_singletons_num += singletons_num
if NP_only or min_span:
a_ : Dict = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A )
if remove_nested:
a_ , a_ : Optional[Any] = reader.remove_nested_coref_mentions(__A , __A )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a_ , a_ : Optional[Any] = reader.remove_nested_coref_mentions(__A , __A )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a_ : int = reader.get_mention_assignments(__A , __A )
a_ : List[Any] = reader.get_mention_assignments(__A , __A )
a_ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[int] , __A : Optional[Any] , __A : Optional[int] , __A : Tuple , __A : Dict , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
a_ : int = get_coref_infos(__A , __A , __A , __A , __A , __A )
a_ : List[Any] = {}
a_ : int = 0
a_ : Optional[int] = 0
for name, metric in metrics:
a_ , a_ , a_ : Tuple = evaluator.evaluate_documents(__A , __A , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
a_ : List[str] = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> Dict:
"""simple docstring"""
a_ : List[Any] = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
a_ : List[Any] = line.split()[5]
if not parse_col == "-":
a_ : List[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : int=False ) -> Tuple:
a_ : List[str] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
a_ : Union[str, Any] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a_ : List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE__ , sys_lines=SCREAMING_SNAKE_CASE__ , metrics=SCREAMING_SNAKE_CASE__ , NP_only=SCREAMING_SNAKE_CASE__ , remove_nested=SCREAMING_SNAKE_CASE__ , keep_singletons=SCREAMING_SNAKE_CASE__ , min_span=SCREAMING_SNAKE_CASE__ , )
return score
| 443
| 0
|
import os
# Precomputes a list of the 100 first triangular numbers
lowercase__ =[int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __UpperCamelCase ( ):
__a : Optional[int] = os.path.dirname(os.path.realpath(_UpperCamelCase ) )
__a : Optional[int] = os.path.join(_UpperCamelCase , '''words.txt''' )
__a : Dict = ''''''
with open(_UpperCamelCase ) as f:
__a : Optional[Any] = f.readline()
__a : Optional[Any] = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__a : List[str] = [
word
for word in [sum(ord(_UpperCamelCase ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 521
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
lowercase__ : List[str] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase__ ( self : Tuple ) ->str:
if self.train_file is not None:
UpperCAmelCase_ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase_ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __call__( self : int , UpperCAmelCase__ : int ) ->List[str]:
UpperCAmelCase_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase_ = [feature.pop(UpperCAmelCase__ ) for feature in features]
UpperCAmelCase_ = len(UpperCAmelCase__ )
UpperCAmelCase_ = len(features[0]['''input_ids'''] )
UpperCAmelCase_ = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
UpperCAmelCase_ = list(chain(*UpperCAmelCase__ ) )
UpperCAmelCase_ = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase_ = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase_ = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase_ = {}
if data_args.train_file is not None:
UpperCAmelCase_ = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase_ = data_args.validation_file
UpperCAmelCase_ = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase_ = load_dataset(
_UpperCamelCase , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase_ = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase_ = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase_ = '''sent1'''
UpperCAmelCase_ = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : List[str] ):
UpperCAmelCase_ = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase_ = examples[question_header_name]
UpperCAmelCase_ = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
UpperCAmelCase_ = list(chain(*_UpperCamelCase ) )
UpperCAmelCase_ = list(chain(*_UpperCamelCase ) )
# Tokenize
UpperCAmelCase_ = tokenizer(
_UpperCamelCase , _UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase_ = min(len(_UpperCamelCase ) , data_args.max_train_samples )
UpperCAmelCase_ = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase_ = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase_ = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
UpperCAmelCase_ = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase_ = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : List[str] ):
UpperCAmelCase_ , UpperCAmelCase_ = eval_predictions
UpperCAmelCase_ = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase_ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCAmelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase_ = train_result.metrics
UpperCAmelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
UpperCAmelCase_ = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''train''' , _UpperCamelCase )
trainer.save_metrics('''train''' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
UpperCAmelCase_ = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' , _UpperCamelCase )
trainer.save_metrics('''eval''' , _UpperCamelCase )
UpperCAmelCase_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 390
| 0
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , )->Optional[int]:
'''simple docstring'''
A_ : Tuple = parent
A_ : int = 13
A_ : Dict = 7
A_ : List[Any] = 30
A_ : Any = self.seq_length + self.mem_len
A_ : List[Any] = 15
A_ : List[Any] = True
A_ : Union[str, Any] = True
A_ : List[Any] = 99
A_ : str = [10, 50, 80]
A_ : Optional[int] = 32
A_ : List[Any] = 32
A_ : Optional[Any] = 4
A_ : str = 8
A_ : List[str] = 128
A_ : Dict = 2
A_ : Dict = 2
A_ : Any = None
A_ : Optional[Any] = 1
A_ : List[str] = 0
A_ : str = 3
A_ : List[str] = self.vocab_size - 1
A_ : Dict = 0.0_1
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : str = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Tuple = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _snake_case ( self )->int:
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Tuple = TFTransfoXLModel(_SCREAMING_SNAKE_CASE )
A_ , A_ : int = model(_SCREAMING_SNAKE_CASE ).to_tuple()
A_ : List[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
A_ , A_ : List[str] = model(_SCREAMING_SNAKE_CASE ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : Union[str, Any] = TFTransfoXLLMHeadModel(_SCREAMING_SNAKE_CASE )
A_ , A_ : Dict = model(_SCREAMING_SNAKE_CASE ).to_tuple()
A_ : List[str] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
A_ , A_ : Any = model(_SCREAMING_SNAKE_CASE ).to_tuple()
A_ , A_ : Any = model([input_ids_a, mems_a] ).to_tuple()
A_ : List[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
A_ , A_ : List[str] = model(_SCREAMING_SNAKE_CASE ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : Optional[Any] = TFTransfoXLForSequenceClassification(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_)) : Optional[Any] = config_and_inputs
A_ : List[Any] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
snake_case = () if is_tf_available() else ()
snake_case = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = TFTransfoXLModelTester(self )
A_ : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , d_embed=37 )
def _snake_case ( self )->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
self.model_tester.set_seed()
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Dict:
'''simple docstring'''
self.model_tester.set_seed()
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A_ : Union[str, Any] = model.get_output_embeddings()
assert isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer )
A_ : Tuple = model.get_bias()
assert name is None
else:
A_ : Dict = model.get_output_embeddings()
assert x is None
A_ : Tuple = model.get_bias()
assert name is None
def _snake_case ( self )->Dict:
'''simple docstring'''
pass
@slow
def _snake_case ( self )->Any:
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = TFTransfoXLModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def _snake_case ( self )->Any:
'''simple docstring'''
pass
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
A_ : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A_ : Union[str, Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A_ : Tuple = model.generate(_SCREAMING_SNAKE_CASE , max_length=200 , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].numpy().tolist() , _SCREAMING_SNAKE_CASE )
| 152
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["input_values", "padding_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2_4000 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->Dict:
'''simple docstring'''
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A_ : Dict = chunk_length_s
A_ : Any = overlap
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
A_ : int = True
A_ : str = bool(
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
A_ : int = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
A_ : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
A_ : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE ).T]
# verify inputs are valid
for idx, example in enumerate(_SCREAMING_SNAKE_CASE ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
A_ : int = None
A_ : Optional[Any] = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
A_ : List[str] = min(array.shape[0] for array in raw_audio )
A_ : int = int(np.floor(max_length / self.chunk_stride ) )
A_ : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
A_ : Optional[int] = max(array.shape[0] for array in raw_audio )
A_ : Any = int(np.ceil(max_length / self.chunk_stride ) )
A_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
A_ : Dict = '''max_length'''
else:
A_ : str = input_values
# normal padding on batch
if padded_inputs is None:
A_ : Dict = self.pad(
_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
if padding:
A_ : Any = padded_inputs.pop('''attention_mask''' )
A_ : str = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
A_ : int = example[..., None]
input_values.append(example.T )
A_ : Union[str, Any] = input_values
if return_tensors is not None:
A_ : str = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 152
| 1
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ : List[str] = logging.get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Dict ):
snake_case_ : Any = question_encoder
snake_case_ : Dict = generator
snake_case_ : Tuple = self.question_encoder
def _snake_case ( self : int , lowercase_ : str ):
if os.path.isfile(lowercase_ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
snake_case_ : Any = os.path.join(lowercase_ , '''question_encoder_tokenizer''' )
snake_case_ : str = os.path.join(lowercase_ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(lowercase_ )
self.generator.save_pretrained(lowercase_ )
@classmethod
def _snake_case ( cls : str , lowercase_ : Optional[Any] , **lowercase_ : Dict ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : Any = kwargs.pop('''config''' , lowercase_ )
if config is None:
snake_case_ : Tuple = RagConfig.from_pretrained(lowercase_ )
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Any = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=lowercase_ , generator=lowercase_ )
def __call__( self : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Dict ):
return self.current_tokenizer(*lowercase_ , **lowercase_ )
def _snake_case ( self : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ):
return self.generator.batch_decode(*lowercase_ , **lowercase_ )
def _snake_case ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
return self.generator.decode(*lowercase_ , **lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.question_encoder
def _snake_case ( self : Tuple ):
snake_case_ : str = self.generator
def _snake_case ( self : Any , lowercase_ : List[Any] , lowercase_ : Union[str, Any] = None , lowercase_ : List[Any] = None , lowercase_ : int = None , lowercase_ : Tuple = "longest" , lowercase_ : Any = None , lowercase_ : str = True , **lowercase_ : int , ):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , lowercase_ , )
if max_length is None:
snake_case_ : int = self.current_tokenizer.model_max_length
snake_case_ : int = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : int = self.current_tokenizer.model_max_length
snake_case_ : Dict = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = labels["""input_ids"""]
return model_inputs
| 123
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase ={
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681
| 0
|
'''simple docstring'''
from __future__ import annotations
def __A ( a_ : Any ,a_ : Union[str, Any] ):
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : list[int] = []
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = sum(__SCREAMING_SNAKE_CASE )
create_state_space_tree(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
return result
def __A ( a_ : Any ,a_ : Union[str, Any] ,a_ : List[Any] ,a_ : List[str] ,a_ : int ,a_ : Optional[int] ,):
if sum(__SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(__SCREAMING_SNAKE_CASE )) < max_sum:
return
if sum(__SCREAMING_SNAKE_CASE ) == max_sum:
result.append(__SCREAMING_SNAKE_CASE )
return
for index in range(__SCREAMING_SNAKE_CASE ,len(__SCREAMING_SNAKE_CASE ) ):
create_state_space_tree(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,index + 1 ,[*path, nums[index]] ,__SCREAMING_SNAKE_CASE ,remaining_nums_sum - nums[index] ,)
lowerCAmelCase = [3, 34, 4, 12, 5, 2]
lowerCAmelCase = 9
lowerCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 704
|
'''simple docstring'''
import numpy as np
def __A ( a_ : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
| 0
|
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowercase( __a : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase( ):
a__ =2
while True:
if is_prime(__a ):
yield num
num += 1
def _lowercase( __a : int = 200_0000 ):
return sum(takewhile(lambda __a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_SCREAMING_SNAKE_CASE = ["""text""", """image""", """audio"""]
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
inputs.append(create_inputs(UpperCamelCase_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = []
for output in outputs:
if isinstance(UpperCamelCase_ , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(UpperCamelCase_ , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(UpperCamelCase_ , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE_ :
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
UpperCamelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCamelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = create_inputs(self.tool.inputs )
UpperCamelCase = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCamelCase = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ) , self.tool.outputs )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = create_inputs(self.tool.inputs )
UpperCamelCase = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs ):
UpperCamelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = create_inputs(self.tool.inputs )
UpperCamelCase = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCamelCase = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
| 537
| 0
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> int:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__a = str(bin(__snake_case ) )[2:] # remove the leading "0b"
__a = str(bin(__snake_case ) )[2:] # remove the leading "0b"
__a = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SpeechTaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(_a )
__a = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _a ):
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def __UpperCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
__a , __a = self.get_input_output_texts(_a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_a ) , 81 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a = tokenizer.add_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a = tokenizer.add_special_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __UpperCAmelCase ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_a , )
| 65
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : int = logging.get_logger(__name__)
def lowercase__( A , A=False ):
snake_case__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( A , A , A=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Any = ''
else:
snake_case__ : str = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
snake_case__ : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowercase__( A ):
snake_case__ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase__( A , A , A ):
snake_case__ : str = dct.pop(A )
snake_case__ : int = val
def lowercase__( ):
snake_case__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : List[str] = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase__( A , A ):
snake_case__ : Optional[int] = ViTConfig()
snake_case__ : Union[str, Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ : str = True
snake_case__ : List[str] = int(vit_name[-1_2:-1_0] )
snake_case__ : List[Any] = int(vit_name[-9:-6] )
else:
snake_case__ : str = 1_0_0_0
snake_case__ : Dict = 'huggingface/label-files'
snake_case__ : List[Any] = 'imagenet-1k-id2label.json'
snake_case__ : List[str] = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : Dict = {int(A ): v for k, v in idalabel.items()}
snake_case__ : Union[str, Any] = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
snake_case__ : Dict = int(vit_name[-6:-4] )
snake_case__ : Tuple = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
snake_case__ : Optional[Any] = 1_9_2
snake_case__ : List[Any] = 7_6_8
snake_case__ : Tuple = 1_2
snake_case__ : Tuple = 3
elif vit_name[9:].startswith('small' ):
snake_case__ : str = 3_8_4
snake_case__ : Tuple = 1_5_3_6
snake_case__ : List[Any] = 1_2
snake_case__ : Any = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
snake_case__ : Optional[int] = 7_6_8
snake_case__ : List[str] = 2_3_0_4
snake_case__ : List[str] = 8
snake_case__ : Optional[Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
snake_case__ : int = 1_0_2_4
snake_case__ : Union[str, Any] = 4_0_9_6
snake_case__ : Union[str, Any] = 2_4
snake_case__ : List[str] = 1_6
elif vit_name[4:].startswith('huge' ):
snake_case__ : Dict = 1_2_8_0
snake_case__ : Tuple = 5_1_2_0
snake_case__ : Any = 3_2
snake_case__ : Dict = 1_6
# load original model from timm
snake_case__ : Tuple = timm.create_model(A , pretrained=A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : str = timm_model.state_dict()
if base_model:
remove_classification_head_(A )
snake_case__ : Union[str, Any] = create_rename_keys(A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : Any = ViTModel(A ).eval()
else:
snake_case__ : Tuple = ViTForImageClassification(A ).eval()
model.load_state_dict(A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ : Optional[Any] = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ : List[str] = ViTImageProcessor(size=config.image_size )
snake_case__ : List[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Any = encoding['pixel_values']
snake_case__ : Union[str, Any] = model(A )
if base_model:
snake_case__ : Optional[Any] = timm_model.forward_features(A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ : List[str] = timm_model(A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1e-3 )
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 170
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase__( A ):
if "model" in orig_key:
snake_case__ : Any = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
snake_case__ : Optional[int] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
snake_case__ : Tuple = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
snake_case__ : List[Any] = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
snake_case__ : Tuple = orig_key.split('.' )[0].split('_' )[-1]
snake_case__ : Optional[Any] = orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
snake_case__ : Union[str, Any] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
snake_case__ : Optional[Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
snake_case__ : Optional[int] = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
snake_case__ : List[Any] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
snake_case__ : str = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
snake_case__ : int = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
snake_case__ : str = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
snake_case__ : Union[str, Any] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
snake_case__ : int = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
snake_case__ : Optional[int] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
snake_case__ : Optional[int] = 'yoso.' + orig_key
return orig_key
def lowercase__( A , A ):
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[Any] = orig_state_dict.pop(A )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
snake_case__ : Optional[Any] = val
snake_case__ : Tuple = orig_state_dict['cls.predictions.decoder.bias']
snake_case__ : Optional[Any] = torch.arange(A ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase__( A , A , A ):
snake_case__ : Tuple = torch.load(A , map_location='cpu' )['model_state_dict']
snake_case__ : Union[str, Any] = YosoConfig.from_json_file(A )
snake_case__ : Optional[int] = YosoForMaskedLM(A )
snake_case__ : str = convert_checkpoint_helper(config.max_position_embeddings , A )
print(model.load_state_dict(A ) )
model.eval()
model.save_pretrained(A )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 170
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase="attention" ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__magic_name__ : Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__magic_name__ : Tuple = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__magic_name__ : Any = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase=False ) ->Union[str, Any]:
"""simple docstring"""
if split_mlp_wi:
__magic_name__ : Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__magic_name__ : Optional[int] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__magic_name__ : str = (wi_a, wi_a)
else:
__magic_name__ : Optional[int] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__magic_name__ : Union[str, Any] = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowerCAmelCase ( UpperCAmelCase, *, UpperCAmelCase, UpperCAmelCase ) ->int:
"""simple docstring"""
__magic_name__ : Dict = traverse_util.flatten_dict(variables['''target'''] )
__magic_name__ : int = {'''/'''.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__magic_name__ : Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''', UpperCAmelCase )
__magic_name__ : Optional[int] = collections.OrderedDict()
# Shared embeddings.
__magic_name__ : int = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__magic_name__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase, UpperCAmelCase, '''encoder''', '''pre_attention_layer_norm''' )
__magic_name__ : List[Any] = tax_attention_lookup(UpperCAmelCase, UpperCAmelCase, '''encoder''', '''attention''' )
__magic_name__ : str = layer_norm
__magic_name__ : List[str] = k.T
__magic_name__ : Union[str, Any] = o.T
__magic_name__ : Optional[Any] = q.T
__magic_name__ : Optional[Any] = v.T
# Block i, layer 1 (MLP).
__magic_name__ : Dict = tax_layer_norm_lookup(UpperCAmelCase, UpperCAmelCase, '''encoder''', '''pre_mlp_layer_norm''' )
__magic_name__ : Dict = tax_mlp_lookup(UpperCAmelCase, UpperCAmelCase, '''encoder''', UpperCAmelCase )
__magic_name__ : int = layer_norm
if split_mlp_wi:
__magic_name__ : Union[str, Any] = wi[0].T
__magic_name__ : Optional[Any] = wi[1].T
else:
__magic_name__ : int = wi.T
__magic_name__ : Union[str, Any] = wo.T
__magic_name__ : int = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__magic_name__ : int = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__magic_name__ : Any = tax_layer_norm_lookup(UpperCAmelCase, UpperCAmelCase, '''decoder''', '''pre_self_attention_layer_norm''' )
__magic_name__ : Optional[Any] = tax_attention_lookup(UpperCAmelCase, UpperCAmelCase, '''decoder''', '''self_attention''' )
__magic_name__ : Optional[int] = layer_norm
__magic_name__ : int = k.T
__magic_name__ : List[Any] = o.T
__magic_name__ : List[str] = q.T
__magic_name__ : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
__magic_name__ : List[Any] = tax_layer_norm_lookup(UpperCAmelCase, UpperCAmelCase, '''decoder''', '''pre_cross_attention_layer_norm''' )
__magic_name__ : int = tax_attention_lookup(UpperCAmelCase, UpperCAmelCase, '''decoder''', '''encoder_decoder_attention''' )
__magic_name__ : Optional[Any] = layer_norm
__magic_name__ : int = k.T
__magic_name__ : Dict = o.T
__magic_name__ : Optional[Any] = q.T
__magic_name__ : Dict = v.T
# Block i, layer 2 (MLP).
__magic_name__ : Union[str, Any] = tax_layer_norm_lookup(UpperCAmelCase, UpperCAmelCase, '''decoder''', '''pre_mlp_layer_norm''' )
__magic_name__ : Tuple = tax_mlp_lookup(UpperCAmelCase, UpperCAmelCase, '''decoder''', UpperCAmelCase )
__magic_name__ : Any = layer_norm
if split_mlp_wi:
__magic_name__ : str = wi[0].T
__magic_name__ : Union[str, Any] = wi[1].T
else:
__magic_name__ : Union[str, Any] = wi.T
__magic_name__ : str = wo.T
__magic_name__ : int = old['''decoder/decoder_norm/scale''']
__magic_name__ : Optional[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__magic_name__ : str = old['''decoder/logits_dense/kernel'''].T
return new
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->Dict:
"""simple docstring"""
__magic_name__ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__magic_name__ : Dict = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__magic_name__ : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__magic_name__ : List[Any] = state_dict['''shared.weight''']
return state_dict
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Tuple:
"""simple docstring"""
__magic_name__ : int = checkpoints.load_tax_checkpoint(UpperCAmelCase )
__magic_name__ : Optional[Any] = convert_tax_to_pytorch(UpperCAmelCase, num_layers=config.num_layers, is_encoder_only=UpperCAmelCase )
__magic_name__ : Optional[Any] = make_state_dict(UpperCAmelCase, UpperCAmelCase )
model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = False ) ->Optional[Any]:
"""simple docstring"""
__magic_name__ : int = TaConfig.from_json_file(UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__magic_name__ : str = TaEncoderModel(UpperCAmelCase )
else:
__magic_name__ : List[Any] = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 713
|
from __future__ import annotations
class A__ :
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
__magic_name__ : List[str] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase ( UpperCAmelCase ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
__magic_name__ : Tuple = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : List[str] = Node(4 )
__magic_name__ : str = Node(5 )
__magic_name__ : List[Any] = Node(6 )
__magic_name__ : Optional[int] = Node(7 )
__magic_name__ : str = Node(8 )
__magic_name__ : str = Node(9 )
print(is_full_binary_tree(UpperCAmelCase ) )
print(depth_of_tree(UpperCAmelCase ) )
print('''Tree is: ''' )
display(UpperCAmelCase )
if __name__ == "__main__":
main()
| 336
| 0
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[Any] = "EncodecFeatureExtractor"
lowercase : str = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
_SCREAMING_SNAKE_CASE =self.feature_extractor
_SCREAMING_SNAKE_CASE =False
def UpperCamelCase_ ( self , _A=None , _A=None , _A=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_A , language=_A , no_timestamps=_A )
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
_SCREAMING_SNAKE_CASE =kwargs.pop('''audio''' , _A )
_SCREAMING_SNAKE_CASE =kwargs.pop('''sampling_rate''' , _A )
_SCREAMING_SNAKE_CASE =kwargs.pop('''text''' , _A )
if len(_A ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer(_A , **_A )
if audio is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_SCREAMING_SNAKE_CASE =audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
_SCREAMING_SNAKE_CASE =audio_inputs['''padding_mask''']
return inputs
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =kwargs.pop('''audio''' , _A )
_SCREAMING_SNAKE_CASE =kwargs.pop('''padding_mask''' , _A )
if len(_A ) > 0:
_SCREAMING_SNAKE_CASE =args[0]
_SCREAMING_SNAKE_CASE =args[1:]
if audio_values is not None:
return self._decode_audio(_A , padding_mask=_A )
else:
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCamelCase_ ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
def UpperCamelCase_ ( self , _A , _A = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =to_numpy(_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =audio_values.shape
if padding_mask is None:
return list(_A )
_SCREAMING_SNAKE_CASE =to_numpy(_A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_SCREAMING_SNAKE_CASE =seq_len - padding_mask.shape[-1]
_SCREAMING_SNAKE_CASE =1 - self.feature_extractor.padding_value
_SCREAMING_SNAKE_CASE =np.pad(_A , ((0, 0), (0, difference)) , '''constant''' , constant_values=_A )
_SCREAMING_SNAKE_CASE =audio_values.tolist()
for i in range(_A ):
_SCREAMING_SNAKE_CASE =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_SCREAMING_SNAKE_CASE =sliced_audio.reshape(_A , -1 )
return audio_values
| 255
|
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_A , '''num_attention_heads''' ) )
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _A , _A=1_3 , _A=6_4 , _A=3 , _A=3 , _A=2 , _A=1 , _A=1_6 , _A=[1_2_8, 2_5_6, 3_8_4] , _A=[4, 6, 8] , _A=[2, 3, 4] , _A=[1_6, 1_6, 1_6] , _A=0 , _A=[2, 2, 2] , _A=[2, 2, 2] , _A=0.02 , _A=True , _A=True , _A=2 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =kernel_size
_SCREAMING_SNAKE_CASE =stride
_SCREAMING_SNAKE_CASE =padding
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =key_dim
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =attention_ratio
_SCREAMING_SNAKE_CASE =mlp_ratio
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =[
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =initializer_range
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LevitModel(config=_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE =model(_A )
_SCREAMING_SNAKE_CASE =(self.image_size, self.image_size)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =image_size[0], image_size[1]
for _ in range(4 ):
_SCREAMING_SNAKE_CASE =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_SCREAMING_SNAKE_CASE =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =LevitForImageClassification(_A )
model.to(_A )
model.eval()
_SCREAMING_SNAKE_CASE =model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase : Optional[int] = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase : str = False
lowercase : Dict = False
lowercase : Optional[Any] = False
lowercase : str = False
lowercase : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LevitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(_A , _A , _A ):
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_A , _A ) )
_SCREAMING_SNAKE_CASE =outputs.hidden_states
_SCREAMING_SNAKE_CASE =len(self.model_tester.depths ) + 1
self.assertEqual(len(_A ) , _A )
_SCREAMING_SNAKE_CASE =(self.model_tester.image_size, self.model_tester.image_size)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =image_size[0], image_size[1]
for _ in range(4 ):
_SCREAMING_SNAKE_CASE =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_SCREAMING_SNAKE_CASE =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_A , _A , _A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self , _A , _A , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_A , _A , return_labels=_A )
_SCREAMING_SNAKE_CASE =model(**_A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_SCREAMING_SNAKE_CASE =model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_A , _A , return_labels=_A )
_SCREAMING_SNAKE_CASE =model(**_A ).loss
loss.backward()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =[
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
_SCREAMING_SNAKE_CASE =problem_type['''title''']
_SCREAMING_SNAKE_CASE =problem_type['''num_labels''']
_SCREAMING_SNAKE_CASE =model_class(_A )
model.to(_A )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_A , _A , return_labels=_A )
if problem_type["num_labels"] > 1:
_SCREAMING_SNAKE_CASE =inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_SCREAMING_SNAKE_CASE =inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
_SCREAMING_SNAKE_CASE =model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =LevitModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _lowerCAmelCase() -> Tuple:
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_A )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_A )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
_SCREAMING_SNAKE_CASE =torch.tensor([1.0448, -0.3745, -1.8317] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 255
| 1
|
def _lowercase ( a_ : str ,a_ : int ) -> list:
'''simple docstring'''
__magic_name__ = word.split()
def justify(a_ : list ,a_ : int ,a_ : int ) -> str:
__magic_name__ = max_width - width
__magic_name__ = len(a_ )
if len(a_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__magic_name__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__magic_name__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__magic_name__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(a_ ):
num_spaces_between_words_list[i] += 1
__magic_name__ = []
for i in range(a_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(a_ )
__magic_name__ = []
__magic_name__ = []
__magic_name__ = 0
for word in words:
if width + len(a_ ) + len(a_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(a_ )
width += len(a_ )
else:
# justify the line and add it to result
answer.append(justify(a_ ,a_ ,a_ ) )
# reset new line and new width
__magic_name__, __magic_name__ = [word], len(a_ )
__magic_name__ = max_width - width - len(a_ )
answer.append(' '.join(a_ ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 184
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self: Optional[Any] , *,
__UpperCamelCase: int = 4 , __UpperCamelCase: int = 7_68 , __UpperCamelCase: int , __UpperCamelCase: Any , ):
'''simple docstring'''
super().__init__()
__magic_name__ = nn.Parameter(torch.zeros(__UpperCamelCase ) )
# parameters for additional clip time embeddings
__magic_name__ = nn.Linear(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = nn.Linear(__UpperCamelCase , __UpperCamelCase )
# parameters for encoder hidden states
__magic_name__ = clip_extra_context_tokens
__magic_name__ = nn.Linear(
__UpperCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
__magic_name__ = nn.Linear(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = nn.LayerNorm(__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] , *, __UpperCamelCase: Union[str, Any] , __UpperCamelCase: str , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Any ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__magic_name__ = image_embeddings.shape[0]
__magic_name__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__magic_name__ = classifier_free_guidance_embeddings.expand(
__UpperCamelCase , -1 )
__magic_name__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__magic_name__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__magic_name__ = self.embedding_proj(__UpperCamelCase )
__magic_name__ = self.clip_image_embeddings_project_to_time_embeddings(__UpperCamelCase )
__magic_name__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__magic_name__ = self.clip_extra_context_tokens_proj(__UpperCamelCase )
__magic_name__ = clip_extra_context_tokens.reshape(__UpperCamelCase , -1 , self.clip_extra_context_tokens )
__magic_name__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
__magic_name__ = self.encoder_hidden_states_proj(__UpperCamelCase )
__magic_name__ = self.text_encoder_hidden_states_norm(__UpperCamelCase )
__magic_name__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 184
| 1
|
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = nn.Linear(3 , 4 )
_lowerCAmelCase = nn.BatchNormad(4 )
_lowerCAmelCase = nn.Linear(4 , 5 )
def a ( self : str , __lowerCAmelCase : str ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__lowerCAmelCase ) ) )
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def a ( self : int , __lowerCAmelCase : int , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def a ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
return output + 1
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = ModelHook()
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(test_model._hf_hook , __lowerCAmelCase )
self.assertTrue(hasattr(__lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__lowerCAmelCase )
self.assertFalse(hasattr(__lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(__lowerCAmelCase , '_old_forward' ) )
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = ModelHook()
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase , append=__lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(__lowerCAmelCase )
self.assertFalse(hasattr(__lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(__lowerCAmelCase , '_old_forward' ) )
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = test_model(x + 1 )
_lowerCAmelCase = test_model(x + 2 )
_lowerCAmelCase = PreForwardHook()
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCAmelCase = PreForwardHook()
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCAmelCase = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = test_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-5 )
def a ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = test_model(__lowerCAmelCase )
_lowerCAmelCase = PostForwardHook()
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCAmelCase = PostForwardHook()
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCAmelCase = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = test_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , output + 2 , atol=1e-5 )
def a ( self : str ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = test_model(__lowerCAmelCase )
_lowerCAmelCase = PostForwardHook()
add_hook_to_module(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = test_model(__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowerCAmelCase = True
_lowerCAmelCase = test_model(__lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a ( self : Tuple ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__lowerCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__lowerCAmelCase , AlignDevicesHook(io_same_device=__lowerCAmelCase ) )
_lowerCAmelCase = torch.randn(2 , 3 ).to(0 )
_lowerCAmelCase = model(__lowerCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , __lowerCAmelCase )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__lowerCAmelCase )
self.assertEqual(output.device , __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
_lowerCAmelCase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__lowerCAmelCase )
self.assertEqual(output.device , __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__lowerCAmelCase , execution_device=__lowerCAmelCase , offload=__lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase = torch.device(__lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __lowerCAmelCase )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__lowerCAmelCase )
self.assertEqual(output.device , __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__lowerCAmelCase , execution_device=__lowerCAmelCase , offload=__lowerCAmelCase , offload_buffers=__lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__lowerCAmelCase )
self.assertEqual(output.device , __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def a ( self : List[Any] ):
"""simple docstring"""
_lowerCAmelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__lowerCAmelCase , execution_device=__lowerCAmelCase , offload=__lowerCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase = torch.device(__lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __lowerCAmelCase )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__lowerCAmelCase )
self.assertEqual(output.device , __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__lowerCAmelCase , execution_device=__lowerCAmelCase , offload=__lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=__lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
_lowerCAmelCase = torch.randn(2 , 3 )
_lowerCAmelCase = model(__lowerCAmelCase )
self.assertEqual(output.device , __lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 309
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "FlavaImageProcessor"
__A = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
_lowerCAmelCase = kwargs.pop('feature_extractor' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = self.image_processor
def __call__( self : Union[str, Any] , __lowerCAmelCase : Optional[ImageInput] = None , __lowerCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if images is not None:
_lowerCAmelCase = self.image_processor(
__lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(__lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def a ( self : Any , *__lowerCAmelCase : str , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def a ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def a ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 309
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = ['''image_processor''', '''tokenizer''']
UpperCAmelCase = '''Pix2StructImageProcessor'''
UpperCAmelCase = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> int:
_a = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , __UpperCamelCase=None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 2_048 , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
_a = self.tokenizer
_a = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_a = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , max_patches=__UpperCamelCase , **__UpperCamelCase )
else:
# add pixel_values and bbox
_a = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , max_patches=__UpperCamelCase , header_text=__UpperCamelCase , **__UpperCamelCase )
if text is not None and not self.image_processor.is_vqa:
_a = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
if "attention_mask" in text_encoding:
_a = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
_a = text_encoding.pop("input_ids" )
else:
_a = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def a_ ( self , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def a_ ( self , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def a_ ( self ) -> Optional[Any]:
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 276
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 276
| 1
|
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__SCREAMING_SNAKE_CASE : Any = get_sagemaker_input()
else:
__SCREAMING_SNAKE_CASE : List[str] = get_cluster_input()
return config
def __A ( _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
if subparsers is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = subparsers.add_parser("config" , description=lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser("Accelerate config command" , description=lowerCAmelCase__ )
parser.add_argument(
"--config_file" , default=lowerCAmelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = get_user_input()
if args.config_file is not None:
__SCREAMING_SNAKE_CASE : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(f'accelerate configuration saved at {config_file}' )
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = config_command_parser()
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 211
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75
| 0
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = split_dict._to_yaml_list()
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = SplitDict._from_yaml_list(UpperCamelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase__ : Any = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase__ : Optional[int] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase__ ), SplitInfo(dataset_name="""my_dataset""" )] )
def _UpperCamelCase ( UpperCamelCase__ ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase__ : Any = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 113
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _snake_case ( yaml.SafeLoader ):
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCAmelCase__ : List[str] = [tuple(_lowerCamelCase) if isinstance(_lowerCamelCase , _lowerCamelCase) else key for key in keys]
UpperCAmelCase__ : List[str] = Counter(_lowerCamelCase)
UpperCAmelCase__ : Any = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''')
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=False):
UpperCAmelCase__ : List[str] = super().construct_mapping(_lowerCamelCase , deep=_lowerCamelCase)
self._check_no_duplicates_on_constructed_node(_lowerCamelCase)
return mapping
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCAmelCase__ : Dict = full_content[1:].index("""---""" ) + 1
UpperCAmelCase__ : List[Any] = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class _snake_case ( a__ ):
# class attributes
lowerCAmelCase :str = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
with open(_lowerCamelCase , encoding="""utf-8""") as readme_file:
UpperCAmelCase__ , UpperCAmelCase__ : Any = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(_lowerCamelCase)
else:
return cls()
def snake_case__ ( self , _lowerCamelCase):
if path.exists():
with open(_lowerCamelCase , encoding="""utf-8""") as readme_file:
UpperCAmelCase__ : int = readme_file.read()
else:
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Union[str, Any] = self._to_readme(_lowerCamelCase)
with open(_lowerCamelCase , """w""" , encoding="""utf-8""") as readme_file:
readme_file.write(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase = None):
if readme_content is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = _split_yaml_from_readme(_lowerCamelCase)
UpperCAmelCase__ : str = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
UpperCAmelCase__ : Optional[int] = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
UpperCAmelCase__ : List[str] = yaml.load(_lowerCamelCase , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCAmelCase__ : List[str] = {
(key.replace("""-""" , """_""") if key.replace("""-""" , """_""") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowerCamelCase)
def snake_case__ ( self):
return yaml.safe_dump(
{
(key.replace("""_""" , """-""") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowerCamelCase , allow_unicode=_lowerCamelCase , encoding="""utf-8""" , ).decode("""utf-8""")
__A ={
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A =ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__A =ap.parse_args()
__A =Path(args.readme_filepath)
__A =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 113
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
'''simple docstring'''
def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int]=13 , __snake_case : List[str]=7 , __snake_case : Optional[int]=True , __snake_case : Union[str, Any]=True , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=99 , __snake_case : Any=16 , __snake_case : Optional[int]=36 , __snake_case : Tuple=6 , __snake_case : List[Any]=6 , __snake_case : Tuple=6 , __snake_case : List[str]=37 , __snake_case : Optional[int]="gelu" , __snake_case : List[str]=0.1 , __snake_case : str=0.1 , __snake_case : int=5_12 , __snake_case : List[str]=16 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : int=3 , __snake_case : Optional[Any]=4 , __snake_case : Any=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : str , __snake_case : int ):
UpperCAmelCase_ = AlbertModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase_ = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase_ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple ):
UpperCAmelCase_ = AlbertForPreTraining(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , sentence_order_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
UpperCAmelCase_ = AlbertForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ):
UpperCAmelCase_ = AlbertForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Dict , __snake_case : Dict , __snake_case : int , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Any , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , __snake_case : List[str] , __snake_case : Any ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Any , __snake_case : Dict , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int ):
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = AlbertForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Tuple = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : str = True
def lowerCamelCase_ ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Tuple=False ):
UpperCAmelCase_ = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = AlbertModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*__snake_case )
@slow
def lowerCamelCase_ ( self : Any ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AlbertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = AlbertModel.from_pretrained('''albert-base-v2''' )
UpperCAmelCase_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case )[0]
UpperCAmelCase_ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase_ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) )
| 144
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ : Dict =FunnelTokenizer
lowerCamelCase__ : Tuple =FunnelTokenizerFast
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[Any] =True
def lowercase ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
__magic_name__ : Optional[int] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase ( self , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase ( self , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : str = '''UNwant\u00E9d,running'''
__magic_name__ : Tuple = '''unwanted, running'''
return input_text, output_text
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Any = self.tokenizer_class(self.vocab_file )
__magic_name__ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
__magic_name__ : str = tokenizer('''UNwant\u00E9d,running''' )
__magic_name__ : List[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__magic_name__ : Dict = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 154
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : str = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A ( a , a ):
__UpperCAmelCase : List[str] = """swin"""
__UpperCAmelCase : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=2_2_4 , snake_case_=4 , snake_case_=3 , snake_case_=9_6 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 1_2, 2_4] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=3_2 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
_a = image_size
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(snake_case_ )
_a = num_heads
_a = window_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = use_absolute_embeddings
_a = layer_norm_eps
_a = initializer_range
_a = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
_a = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
class A ( a ):
__UpperCAmelCase : Tuple = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
| 691
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A :
def __init__( self , snake_case_ ) -> Optional[int]:
_a = str(id_ )
_a = None
_a = None
_a = []
_a = {} # {vertex:distance}
def __lt__( self , snake_case_ ) -> Optional[Any]:
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
return self.id
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
self.neighbors.append(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ ) -> Any:
_a = weight
def _lowercase ( lowerCamelCase__ : Dict, lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : str ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1], lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
_a = []
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = graph[:]
while q:
_a = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
for i in range(1, len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ : list, lowerCamelCase__ : Vertex ):
for u in graph:
_a = math.inf
_a = None
_a = 0
_a = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
_a = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a = u
_a = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1, len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class __snake_case ( Generic[T]):
def __init__( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = data
_lowerCamelCase : Node[T] | None = None
def __str__( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.data}'''
class __snake_case ( Generic[T]):
def __init__( self : int ):
"""simple docstring"""
_lowerCamelCase : Node[T] | None = None
def __iter__( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.top
while node:
yield node.data
_lowerCamelCase : Any = node.next
def __str__( self : int ):
"""simple docstring"""
return "->".join([str(__lowerCAmelCase ) for item in self] )
def __len__( self : int ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return self.top is None
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : T ):
"""simple docstring"""
_lowerCamelCase : Tuple = Node(__lowerCAmelCase )
if not self.is_empty():
_lowerCamelCase : Optional[int] = self.top
_lowerCamelCase : List[str] = node
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowerCAmelCase )
_lowerCamelCase : Any = self.top
_lowerCamelCase : Any = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83
| 0
|
import torch
from diffusers import DiffusionPipeline
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Any ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def __call__( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__lowercase = 1
__lowercase = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
__lowercase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
__lowercase = scheduler_output - scheduler_output + torch.ones_like(lowerCamelCase__ )
return result
| 362
|
import os
def _A( UpperCamelCase__ : str = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as in_file:
__lowercase = in_file.read()
__lowercase = [[int(UpperCamelCase__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
__lowercase = [[0 for cell in row] for row in grid]
__lowercase = len(grid[0] )
__lowercase = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
__lowercase = grid[0][0]
for i in range(1 , UpperCamelCase__ ):
__lowercase = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCamelCase__ ):
__lowercase = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
__lowercase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 362
| 1
|
from math import factorial
def lowerCamelCase_ ( lowerCAmelCase__ : int = 100 ) -> int:
'''simple docstring'''
return sum(map(lowerCAmelCase__ , str(factorial(lowerCAmelCase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 106
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = None
def UpperCAmelCase_ ( _A , _A=0.9_9_9 , _A="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE__ = []
for i in range(_A ):
SCREAMING_SNAKE_CASE__ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Dict , __lowerCamelCase : int = 1000 , __lowerCamelCase : float = 0.0001 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : str = "linear" , __lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "epsilon" , __lowerCamelCase : float = 1.0 , **__lowerCamelCase : str , ) -> Optional[Any]:
if kwargs.get('''set_alpha_to_one''' , __lowerCamelCase ) is not None:
SCREAMING_SNAKE_CASE__ = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __lowerCamelCase , standard_warn=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE__ = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ = 1.0
# setable values
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.arange(0 , __lowerCamelCase ).copy().astype(np.intaa ) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowercase_ ( self : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ) -> Union[str, Any]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE__ = num_inference_steps
SCREAMING_SNAKE_CASE__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (np.arange(0 , __lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def lowercase_ ( self : List[Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
SCREAMING_SNAKE_CASE__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE__ = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ = model_output
SCREAMING_SNAKE_CASE__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def __len__( self : List[str] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 493
| 0
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 240
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[list[float]]:
__lowerCAmelCase : Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCAmelCase : Optional[int] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCAmelCase : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
__lowerCAmelCase , __lowerCAmelCase : str = matrix[1][1], matrix[0][0]
__lowerCAmelCase , __lowerCAmelCase : str = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCAmelCase : Union[str, Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
__lowerCAmelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCAmelCase : Optional[int] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCAmelCase : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCAmelCase : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCAmelCase : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCAmelCase : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCAmelCase : Dict = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCAmelCase : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCAmelCase : Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCAmelCase : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCAmelCase : Optional[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
__lowerCAmelCase : List[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCAmelCase : List[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 240
| 1
|
'''simple docstring'''
import os
import sys
A_ : Dict = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
A_ : Optional[Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : str , **__magic_name__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return AutoConfig.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : int , **__magic_name__ : int ) -> str:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : List[str] , **__magic_name__ : int ) -> Optional[Any]:
'''simple docstring'''
return AutoModel.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : List[str] , **__magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : Dict , **__magic_name__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__magic_name__ , **__magic_name__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase__ ( *__magic_name__ : Dict , **__magic_name__ : Tuple ) -> int:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__magic_name__ , **__magic_name__ )
| 38
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A_ : str = 250004
A_ : str = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = MBartTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case__ : Optional[int] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : int = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : Tuple = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
snake_case__ : Any = tempfile.mkdtemp()
snake_case__ : Optional[int] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
snake_case__ : List[Any] = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : Union[str, Any] = tokenizer_r.save_pretrained(__SCREAMING_SNAKE_CASE , legacy_format=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = tokenizer_p.save_pretrained(__SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case__ : Dict = tokenizer_r.from_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer_p.from_pretrained(__SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = '''facebook/mbart-large-en-ro'''
lowerCamelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCamelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCamelCase__ = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
snake_case__ : Any = 1
return cls
def __UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
snake_case__ : List[Any] = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , __SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = 1_0
snake_case__ : int = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
snake_case__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = MBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case__ : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case__ : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
snake_case__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors="""pt""" )
snake_case__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=1_0 , return_tensors="""pt""" )
snake_case__ : str = targets["""input_ids"""]
snake_case__ : Optional[Any] = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# A, test, EOS, en_XX
"""input_ids""": [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 38
| 1
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : list , lowercase : list , lowercase : int ) -> List[str]:
_a = len(_lowerCamelCase )
_a = [[0] * n for i in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
_a = y_points[i]
for i in range(2 , _lowerCamelCase ):
for j in range(_lowerCamelCase , _lowerCamelCase ):
_a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : str ) -> str:
# Return True if there is node that has not iterated.
_a = [False] * len(lowercase )
_a = [s]
_a = True
while queue:
_a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
_a = True
_a = u
return visited[t]
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] , lowercase : Dict ) -> Union[str, Any]:
_a = [-1] * (len(lowercase ))
_a = 0
_a = []
_a = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase , lowercase , lowercase , lowercase ):
_a = float("Inf" )
_a = sink
while s != source:
# Find the minimum value in select path
_a = min(lowercase , graph[parent[s]][s] )
_a = parent[s]
max_flow += path_flow
_a = sink
while v != source:
_a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_a = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 521
| 0
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : int ) -> Optional[Any]:
# Initialise PyTorch model
lowercase : Union[str, Any] =BigBirdConfig.from_json_file(__magic_name__ )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
lowercase : Union[str, Any] =BigBirdForQuestionAnswering(__magic_name__ )
else:
lowercase : Optional[Any] =BigBirdForPreTraining(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__magic_name__ , __magic_name__ , is_trivia_qa=__magic_name__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 92
|
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
while b:
a , a :int = b, a % b
return a
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def __lowerCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 445
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _snake_case ( unittest.TestCase):
UpperCamelCase__ : Dict =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ : int =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ : Optional[int] ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ : Optional[Any] ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def A__ ( self : Dict, __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : Dict ):
lowercase__ = ZeroShotClassificationPipeline(
model=__snake_case, tokenizer=__snake_case, candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def A__ ( self : int, __lowercase : str, __lowercase : List[Any] ):
lowercase__ = classifier("Who are you voting for in 2020?", candidate_labels="politics" )
self.assertEqual(__snake_case, {"sequence": ANY(__snake_case ), "labels": [ANY(__snake_case )], "scores": [ANY(__snake_case )]} )
# No kwarg
lowercase__ = classifier("Who are you voting for in 2020?", ["politics"] )
self.assertEqual(__snake_case, {"sequence": ANY(__snake_case ), "labels": [ANY(__snake_case )], "scores": [ANY(__snake_case )]} )
lowercase__ = classifier("Who are you voting for in 2020?", candidate_labels=["politics"] )
self.assertEqual(__snake_case, {"sequence": ANY(__snake_case ), "labels": [ANY(__snake_case )], "scores": [ANY(__snake_case )]} )
lowercase__ = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health" )
self.assertEqual(
__snake_case, {"sequence": ANY(__snake_case ), "labels": [ANY(__snake_case ), ANY(__snake_case )], "scores": [ANY(__snake_case ), ANY(__snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ), 1.0 )
lowercase__ = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"] )
self.assertEqual(
__snake_case, {"sequence": ANY(__snake_case ), "labels": [ANY(__snake_case ), ANY(__snake_case )], "scores": [ANY(__snake_case ), ANY(__snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ), 1.0 )
lowercase__ = classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}" )
self.assertEqual(__snake_case, {"sequence": ANY(__snake_case ), "labels": [ANY(__snake_case )], "scores": [ANY(__snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
lowercase__ = classifier(["I am happy"], ["positive", "negative"] )
self.assertEqual(
__snake_case, [
{"sequence": ANY(__snake_case ), "labels": [ANY(__snake_case ), ANY(__snake_case )], "scores": [ANY(__snake_case ), ANY(__snake_case )]}
for i in range(1 )
], )
lowercase__ = classifier(["I am happy", "I am sad"], ["positive", "negative"] )
self.assertEqual(
__snake_case, [
{"sequence": ANY(__snake_case ), "labels": [ANY(__snake_case ), ANY(__snake_case )], "scores": [ANY(__snake_case ), ANY(__snake_case )]}
for i in range(2 )
], )
with self.assertRaises(__snake_case ):
classifier("", candidate_labels="politics" )
with self.assertRaises(__snake_case ):
classifier(__snake_case, candidate_labels="politics" )
with self.assertRaises(__snake_case ):
classifier("Who are you voting for in 2020?", candidate_labels="" )
with self.assertRaises(__snake_case ):
classifier("Who are you voting for in 2020?", candidate_labels=__snake_case )
with self.assertRaises(__snake_case ):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", )
with self.assertRaises(__snake_case ):
classifier(
"Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=__snake_case, )
self.run_entailment_id(__snake_case )
def A__ ( self : Dict, __lowercase : Pipeline ):
lowercase__ = zero_shot_classifier.model.config
lowercase__ = config.labelaid
lowercase__ = zero_shot_classifier.entailment_id
lowercase__ = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id, -1 )
lowercase__ = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
lowercase__ = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id, 0 )
lowercase__ = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id, 2 )
lowercase__ = original_labelaid
self.assertEqual(__snake_case, zero_shot_classifier.entailment_id )
@require_torch
def A__ ( self : str ):
lowercase__ = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"] )
@require_torch
def A__ ( self : Optional[Any] ):
lowercase__ = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", )
lowercase__ = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__snake_case ), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@require_tf
def A__ ( self : Union[str, Any] ):
lowercase__ = pipeline(
"zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", )
lowercase__ = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__snake_case ), {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
}, )
@slow
@require_torch
def A__ ( self : Dict ):
lowercase__ = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="pt" )
lowercase__ = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__snake_case ), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
lowercase__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__snake_case, )
self.assertEqual(
nested_simplify(__snake_case ), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
@slow
@require_tf
def A__ ( self : Tuple ):
lowercase__ = pipeline("zero-shot-classification", model="roberta-large-mnli", framework="tf" )
lowercase__ = zero_shot_classifier(
"Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(__snake_case ), {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
}, )
lowercase__ = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=__snake_case, )
self.assertEqual(
nested_simplify(__snake_case ), {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
}, )
| 704
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['ViTFeatureExtractor']
__snake_case = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 200
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__snake_case = None
try:
import msvcrt
except ImportError:
__snake_case = None
try:
import fcntl
except ImportError:
__snake_case = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__snake_case = OSError
# Data
# ------------------------------------------------
__snake_case = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__snake_case = '3.0.12'
__snake_case = None
def _lowerCamelCase ( ):
global _logger
lowercase__ : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
lowercase__ : Union[str, Any] = lock_file
return None
def __str__( self ) -> List[Any]:
lowercase__ : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : str = lock
return None
def __enter__( self ) -> List[Any]:
return self.lock
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
self.lock.release()
return None
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> Optional[Any]:
lowercase__ : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ : Union[str, Any] = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
lowercase__ : int = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ : Dict = None
# The default timeout value.
lowercase__ : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
lowercase__ : Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ : Union[str, Any] = 0
return None
@property
def UpperCAmelCase__( self ) -> List[str]:
return self._lock_file
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
return self._timeout
@timeout.setter
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Union[str, Any] = float(lowerCamelCase__ )
return None
def UpperCAmelCase__( self ) -> Tuple:
raise NotImplementedError()
def UpperCAmelCase__( self ) -> Tuple:
raise NotImplementedError()
@property
def UpperCAmelCase__( self ) -> str:
return self._lock_file_fd is not None
def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=0.05 ) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ : int = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ : Tuple = id(self )
lowercase__ : Any = self._lock_file
lowercase__ : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ : Any = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__( self , lowerCamelCase__=False ) -> int:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ : Tuple = id(self )
lowercase__ : int = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ : str = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Dict:
self.acquire()
return self
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
self.release()
return None
def __del__( self ) -> int:
self.release(force=lowerCamelCase__ )
return None
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
lowercase__ : Optional[int] = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
lowercase__ : Union[str, Any] = os.path.dirname(lowerCamelCase__ )
lowercase__ : List[Any] = str(hash(lowerCamelCase__ ) )
lowercase__ : Optional[int] = filename[: max_length - len(lowerCamelCase__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> Tuple:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
lowercase__ : List[Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ : Dict = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
lowercase__ : Optional[Any] = fd
return None
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : int = self._lock_file_fd
lowercase__ : Any = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> List[str]:
lowercase__ : Optional[Any] = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def UpperCAmelCase__( self ) -> str:
lowercase__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ : List[Any] = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
lowercase__ : Any = fd
return None
def UpperCAmelCase__( self ) -> str:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ : Optional[int] = self._lock_file_fd
lowercase__ : Optional[Any] = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ : Any = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
lowercase__ : Union[str, Any] = fd
return None
def UpperCAmelCase__( self ) -> Tuple:
os.close(self._lock_file_fd )
lowercase__ : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__snake_case = None
if msvcrt:
__snake_case = WindowsFileLock
elif fcntl:
__snake_case = UnixFileLock
else:
__snake_case = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 200
| 1
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _a ( __a ):
"""simple docstring"""
def __lt__( self : Any , lowercase_ : Dict ):
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self : Any , lowercase_ : int ):
'''simple docstring'''
return self[-1] == other[-1]
def A_ ( SCREAMING_SNAKE_CASE_ ) ->list:
lowercase_ = []
# sort into stacks
for element in collection:
lowercase_ = Stack([element] )
lowercase_ = bisect_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if i != len(SCREAMING_SNAKE_CASE_ ):
stacks[i].append(SCREAMING_SNAKE_CASE_ )
else:
stacks.append(SCREAMING_SNAKE_CASE_ )
# use a heap-based merge to merge stack efficiently
lowercase_ = merge(*(reversed(SCREAMING_SNAKE_CASE_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 702
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def A_ ( ) ->List[str]:
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase_ = parser.parse_args()
return args.f
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="eval" ) ->Optional[int]:
lowercase_ = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{split}_results.json""" )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
raise ValueError(f"""can't find {path}""" )
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( __a ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_flax_glue.main()
lowercase_ = get_results(lowercase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_clm_flax.main()
lowercase_ = get_results(lowercase_ )
self.assertLess(result["""eval_perplexity"""] , 100 )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_summarization_flax.main()
lowercase_ = get_results(lowercase_ , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_mlm_flax.main()
lowercase_ = get_results(lowercase_ )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_ta_mlm_flax.main()
lowercase_ = get_results(lowercase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.4_2 )
@slow
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = 7 if get_gpu_count() > 1 else 2
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_flax_ner.main()
lowercase_ = get_results(lowercase_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir()
lowercase_ = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowercase_ , """argv""" , lowercase_ ):
run_qa.main()
lowercase_ = get_results(lowercase_ )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 603
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowerCamelCase_( A_ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 'mra'
def __init__( self , lowerCamelCase__=5_0_2_6_5 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=3_0_7_2 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-5 , lowerCamelCase__="absolute" , lowerCamelCase__=4 , lowerCamelCase__="full" , lowerCamelCase__=0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = type_vocab_size
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = block_per_row
_lowerCamelCase = approx_mode
_lowerCamelCase = initial_prior_first_n_blocks
_lowerCamelCase = initial_prior_diagonal_n_blocks
| 661
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
def __init__( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ):
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 322
| 0
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ = tf.data.AUTOTUNE
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[Any] = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=_UpperCamelCase , default="roberta-base" , help="The model config to use. Note that we don\'t copy the model\'s weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=_UpperCamelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=_UpperCamelCase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=_UpperCamelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=_UpperCamelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=_UpperCamelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=_UpperCamelCase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=_UpperCamelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=_UpperCamelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=_UpperCamelCase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=_UpperCamelCase , default=1e-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=_UpperCamelCase , default=1e-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=_UpperCamelCase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=_UpperCamelCase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=_UpperCamelCase , required=_UpperCamelCase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=_UpperCamelCase , help="Model ID to upload to on the Hugging Face Hub." )
SCREAMING_SNAKE_CASE_ :Dict = parser.parse_args()
return args
def lowercase ( a ):
'''simple docstring'''
try:
if args.tpu_name:
SCREAMING_SNAKE_CASE_ :str = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(_UpperCamelCase )
tf.tpu.experimental.initialize_tpu_system(_UpperCamelCase )
return tpu
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = 0
for file in file_list:
SCREAMING_SNAKE_CASE_ :str = file.split("/" )[-1]
SCREAMING_SNAKE_CASE_ :Tuple = re.search(R"-\d+-(\d+)\.tfrecord" , _UpperCamelCase ).group(1 )
SCREAMING_SNAKE_CASE_ :List[str] = int(_UpperCamelCase )
num_samples += sample_count
return num_samples
def lowercase ( a , a , a , a , a , a=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Any = count_samples(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ :int = tf.data.Dataset.from_tensor_slices(_UpperCamelCase )
if shuffle:
SCREAMING_SNAKE_CASE_ :List[Any] = dataset.shuffle(len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ :Dict = tf.data.TFRecordDataset(_UpperCamelCase , num_parallel_reads=_UpperCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
SCREAMING_SNAKE_CASE_ :Optional[Any] = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ :List[str] = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
SCREAMING_SNAKE_CASE_ :int = dataset.shuffle(args.shuffle_buffer_size )
SCREAMING_SNAKE_CASE_ :Dict = dataset.batch(_UpperCamelCase , drop_remainder=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ :Tuple = dataset.prefetch(_UpperCamelCase )
return dataset
def lowercase ( a ):
'''simple docstring'''
if not args.no_tpu:
SCREAMING_SNAKE_CASE_ :List[str] = initialize_tpu(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ :List[Any] = tf.distribute.TPUStrategy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ :List[str] = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
SCREAMING_SNAKE_CASE_ :Tuple = AutoTokenizer.from_pretrained(args.tokenizer )
SCREAMING_SNAKE_CASE_ :str = AutoConfig.from_pretrained(args.pretrained_model_config )
SCREAMING_SNAKE_CASE_ :Any = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ :int = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
SCREAMING_SNAKE_CASE_ :Any = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
SCREAMING_SNAKE_CASE_ :List[Any] = count_samples(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ :Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
SCREAMING_SNAKE_CASE_ :Optional[int] = steps_per_epoch * args.num_epochs
with strategy.scope():
SCREAMING_SNAKE_CASE_ :List[str] = TFAutoModelForMaskedLM.from_config(_UpperCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = create_optimizer(
num_train_steps=_UpperCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_UpperCamelCase , metrics=["accuracy"] )
def decode_fn(a ):
SCREAMING_SNAKE_CASE_ :List[str] = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_UpperCamelCase , _UpperCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
SCREAMING_SNAKE_CASE_ :Dict = DataCollatorForLanguageModeling(
tokenizer=_UpperCamelCase , mlm_probability=args.mlm_probability , mlm=_UpperCamelCase , return_tensors="tf" )
def mask_with_collator(a ):
# TF really needs an isin() function
SCREAMING_SNAKE_CASE_ :Optional[Any] = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(_UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCamelCase , )
return batch
SCREAMING_SNAKE_CASE_ :Optional[Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
SCREAMING_SNAKE_CASE_ :int = prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
SCREAMING_SNAKE_CASE_ :List[Any] = prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , )
SCREAMING_SNAKE_CASE_ :Any = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCamelCase ) )
model.fit(
_UpperCamelCase , validation_data=_UpperCamelCase , epochs=args.num_epochs , callbacks=_UpperCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
main(args)
| 721
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Union[str, Any] = """xmod"""
def __init__( self : Tuple , UpperCAmelCase : Tuple=3_05_22 , UpperCAmelCase : Any=7_68 , UpperCAmelCase : Any=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : List[str]=30_72 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Tuple="absolute" , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=None , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=False , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=("en_XX",) , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ :Any = hidden_act
SCREAMING_SNAKE_CASE_ :Dict = intermediate_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Tuple = type_vocab_size
SCREAMING_SNAKE_CASE_ :List[str] = initializer_range
SCREAMING_SNAKE_CASE_ :Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ :str = position_embedding_type
SCREAMING_SNAKE_CASE_ :Any = use_cache
SCREAMING_SNAKE_CASE_ :str = classifier_dropout
SCREAMING_SNAKE_CASE_ :List[str] = pre_norm
SCREAMING_SNAKE_CASE_ :List[str] = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ :int = adapter_layer_norm
SCREAMING_SNAKE_CASE_ :Dict = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ :Optional[Any] = ln_before_adapter
SCREAMING_SNAKE_CASE_ :List[Any] = list(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = default_language
class _UpperCAmelCase ( lowercase ):
@property
def _snake_case ( self : Any):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ :Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 140
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger(__name__)
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Dict=False , UpperCamelCase : str=False , UpperCamelCase : str=False ):
'''simple docstring'''
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_a = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : int ):
'''simple docstring'''
_a = dct.pop(UpperCamelCase )
_a = val
@torch.no_grad()
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : int ):
'''simple docstring'''
_a = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase )
_a = False
_a = False
_a = False
_a = False
if "vqa" in checkpoint_url:
_a = True
_a = 3129
_a = '''huggingface/label-files'''
_a = '''vqa2-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = ViltForQuestionAnswering(UpperCamelCase )
elif "nlvr" in checkpoint_url:
_a = True
_a = 2
_a = {0: '''False''', 1: '''True'''}
_a = {v: k for k, v in config.idalabel.items()}
_a = 3
_a = ViltForImagesAndTextClassification(UpperCamelCase )
elif "irtr" in checkpoint_url:
_a = True
_a = ViltForImageAndTextRetrieval(UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
_a = True
_a = ViltForMaskedLM(UpperCamelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
_a = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='''cpu''' )['''state_dict''']
_a = create_rename_keys(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
if mlm_model or irtr_model:
_a = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_a , _a = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase )
# Define processor
_a = ViltImageProcessor(size=384 )
_a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_a = ViltProcessor(UpperCamelCase , UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_a = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCamelCase ).raw )
_a = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCamelCase ).raw )
_a = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
_a = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
_a = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
_a = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_a = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=UpperCamelCase ).raw )
if mlm_model:
_a = '''a bunch of [MASK] laying on a [MASK].'''
else:
_a = '''How many cats are there?'''
_a = processor(UpperCamelCase , UpperCamelCase , return_tensors='''pt''' )
_a = model(**UpperCamelCase )
# Verify outputs
if mlm_model:
_a = torch.Size([1, 11, 3_0522] )
_a = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_a = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_a = torch.Size([1, 3129] )
_a = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_a = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_a = torch.Size([1, 2] )
_a = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 22
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(__UpperCAmelCase ) as metadata_file:
lowerCamelCase_ : int = json.load(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = LukeConfig(use_entity_aware_attention=__UpperCAmelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowerCamelCase_ : Optional[Any] = torch.load(__UpperCAmelCase , map_location="cpu" )
# Load the entity vocab file
lowerCamelCase_ : Dict = load_entity_vocab(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase_ : str = AddedToken("<ent>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = AddedToken("<ent2>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = LukeTokenizer.from_pretrained(__UpperCAmelCase )
# Initialize the embeddings of the special tokens
lowerCamelCase_ : Tuple = state_dict["embeddings.word_embeddings.weight"]
lowerCamelCase_ : Any = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
lowerCamelCase_ : Dict = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
lowerCamelCase_ : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase_ : Optional[int] = f"encoder.layer.{layer_index}.attention.self."
lowerCamelCase_ : Union[str, Any] = state_dict[prefix + matrix_name]
lowerCamelCase_ : int = state_dict[prefix + matrix_name]
lowerCamelCase_ : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase_ : Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
lowerCamelCase_ : Any = entity_emb[entity_vocab["[MASK]"]]
lowerCamelCase_ : Tuple = LukeModel(config=__UpperCAmelCase ).eval()
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if not (len(__UpperCAmelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(__UpperCAmelCase )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
lowerCamelCase_ : List[str] = LukeTokenizer.from_pretrained(__UpperCAmelCase , task="entity_classification" )
lowerCamelCase_ : List[str] = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
lowerCamelCase_ : List[Any] = (39, 42)
lowerCamelCase_ : Dict = tokenizer(__UpperCAmelCase , entity_spans=[span] , add_prefix_space=__UpperCAmelCase , return_tensors="pt" )
lowerCamelCase_ : Optional[int] = model(**__UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
lowerCamelCase_ : Union[str, Any] = torch.Size((1, 42, 1024) )
lowerCamelCase_ : List[Any] = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
lowerCamelCase_ : List[str] = torch.Size((1, 42, 768) )
lowerCamelCase_ : Tuple = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCamelCase_ : Optional[Any] = torch.Size((1, 1, 1024) )
lowerCamelCase_ : List[str] = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
lowerCamelCase_ : int = torch.Size((1, 1, 768) )
lowerCamelCase_ : Any = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCAmelCase ) )
model.save_pretrained(__UpperCAmelCase )
def __a ( __UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ : Tuple = {}
with open(__UpperCAmelCase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ : int = line.rstrip().split("\t" )
lowerCamelCase_ : Union[str, Any] = index
return entity_vocab
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
snake_case_ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 488
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_A = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCamelCase )
if number < 0:
return False
_A = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
a = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 505
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a: Tuple = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The column name of the images in the files."} )
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[int] = {}
if self.train_dir is not None:
lowercase__ : List[str] = self.train_dir
if self.validation_dir is not None:
lowercase__ : Optional[Any] = self.validation_dir
lowercase__ : Tuple = data_files if data_files else None
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[str] = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , UpperCAmelCase , UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase )
transformers.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ : int = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowercase__ : Any = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase__ : str = split['''train''']
lowercase__ : List[str] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : Tuple = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ : str = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowercase__ : int = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowercase__ : Tuple = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowercase__ : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowercase__ : List[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase__ : str = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase__ : Any = ViTMAEForPreTraining(UpperCAmelCase )
if training_args.do_train:
lowercase__ : Tuple = ds['''train'''].column_names
else:
lowercase__ : Optional[Any] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase__ : List[str] = data_args.image_column_name
elif "image" in column_names:
lowercase__ : Optional[int] = '''image'''
elif "img" in column_names:
lowercase__ : List[Any] = '''img'''
else:
lowercase__ : Tuple = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase__ : List[str] = image_processor.size['''shortest_edge''']
else:
lowercase__ : Optional[int] = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase__ : List[Any] = Compose(
[
Lambda(lambda UpperCAmelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase ):
lowercase__ : Tuple = [transforms(UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase__ : int = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase__ : Any = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase )
# Compute absolute learning rate
lowercase__ : int = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase__ : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase__ : Union[str, Any] = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : Any = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : int = last_checkpoint
lowercase__ : List[Any] = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase )
trainer.save_metrics('''eval''' , UpperCAmelCase )
# Write model card and (optionally) push to hub
lowercase__ : Any = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase )
else:
trainer.create_model_card(**UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 152
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE = False
@property
def _lowerCAmelCase( self ) -> Optional[int]:
return 32
@property
def _lowerCAmelCase( self ) -> Optional[Any]:
return 32
@property
def _lowerCAmelCase( self ) -> List[Any]:
return self.time_input_dim
@property
def _lowerCAmelCase( self ) -> int:
return self.time_input_dim * 4
@property
def _lowerCAmelCase( self ) -> List[str]:
return 100
@property
def _lowerCAmelCase( self ) -> List[str]:
torch.manual_seed(0 )
lowercase__ : str = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ : Optional[int] = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def _lowerCAmelCase( self ) -> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[Any] = self.dummy_unet
lowercase__ : Optional[int] = self.dummy_movq
lowercase__ : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ : Union[str, Any] = DDIMScheduler(**__lowerCAmelCase )
lowercase__ : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Dict:
lowercase__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
# create init_image
lowercase__ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : int = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
lowercase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
lowercase__ : Dict = torch.manual_seed(__lowerCAmelCase )
else:
lowercase__ : Dict = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase__ : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[int] = '''cpu'''
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : List[str] = self.pipeline_class(**__lowerCAmelCase )
lowercase__ : Any = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : int = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase__ : List[Any] = output.images
lowercase__ : str = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ : List[Any] = init_image.resize((512, 512) )
lowercase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase__ : str = torch.from_numpy(np.array(__lowerCAmelCase ) ).float() / 2_5_5.0
lowercase__ : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowercase__ : Union[str, Any] = '''A robot, 4k photo'''
lowercase__ : int = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase__ : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
lowercase__ : Optional[Any] = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ : Optional[Any] = pipe_prior(
__lowerCAmelCase , image=__lowerCAmelCase , strength=0.8_5 , generator=__lowerCAmelCase , negative_prompt='''''' , ).to_tuple()
lowercase__ : Tuple = pipeline(
image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , hint=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 152
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__lowercase : str = 'xlm-roberta-xl'
def __init__( self , __lowercase=250880 , __lowercase=2560 , __lowercase=36 , __lowercase=32 , __lowercase=10240 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=514 , __lowercase=1 , __lowercase=0.02 , __lowercase=1e-05 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 701
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->str:
UpperCAmelCase__ = OmegaConf.load(_SCREAMING_SNAKE_CASE )
if display:
print(yaml.dump(OmegaConf.to_container(_SCREAMING_SNAKE_CASE ) ) )
return config
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if conf_path is None:
UpperCAmelCase__ = """./model_checkpoints/vqgan_only.yaml"""
UpperCAmelCase__ = load_config(_SCREAMING_SNAKE_CASE , display=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase__ = """./model_checkpoints/vqgan_only.pt"""
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
if ".ckpt" in ckpt_path:
UpperCAmelCase__ = sd["""state_dict"""]
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
del sd
return model
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = model.encode(_SCREAMING_SNAKE_CASE )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
UpperCAmelCase__ = model.decode(_SCREAMING_SNAKE_CASE )
return xrec
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->int:
UpperCAmelCase__ , UpperCAmelCase__ = string.rsplit(""".""" , 1 )
if reload:
UpperCAmelCase__ = importlib.import_module(_SCREAMING_SNAKE_CASE )
importlib.reload(_SCREAMING_SNAKE_CASE )
return getattr(importlib.import_module(_SCREAMING_SNAKE_CASE , package=_SCREAMING_SNAKE_CASE ) , cls )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->str:
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True ) ->str:
UpperCAmelCase__ = instantiate_from_config(_SCREAMING_SNAKE_CASE )
if sd is not None:
model.load_state_dict(_SCREAMING_SNAKE_CASE )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
# load the specified checkpoint
if ckpt:
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
UpperCAmelCase__ = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
UpperCAmelCase__ = {"""state_dict""": None}
UpperCAmelCase__ = None
UpperCAmelCase__ = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_SCREAMING_SNAKE_CASE , eval_mode=_SCREAMING_SNAKE_CASE )["""model"""]
return model, global_step
| 422
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Any = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 273
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case: Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "timm_backbone"
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
a_ : Optional[Any] = backbone
a_ : Union[str, Any] = num_channels
a_ : str = features_only
a_ : Any = use_pretrained_backbone
a_ : Tuple = True
a_ : Tuple = out_indices if out_indices is not None else (-1,)
| 577
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowerCamelCase: Dict ) -> bool:
'''simple docstring'''
__lowerCamelCase : Any = str(__UpperCAmelCase )
return len(__UpperCAmelCase ) == 9 and set(__UpperCAmelCase ) == set("123456789" )
def lowercase_ ( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
__lowerCamelCase : List[str] = 100002 * base_num
if is_9_pandigital(__UpperCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCamelCase : Union[str, Any] = 1002003 * base_num
if is_9_pandigital(__UpperCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _snake_case :
def __init__( self : str ):
__lowerCamelCase : Optional[Any] = psutil.Process()
__lowerCamelCase : List[Any] = False
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = -1
while True:
__lowerCamelCase : Union[str, Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
__lowerCamelCase : Optional[Any] = True
self.thread.start()
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : str = False
self.thread.join()
return self.cpu_memory_peak
__A = PeakCPUMemory()
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Any = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCamelCase : Optional[int] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCamelCase : int = torch.cuda.memory_allocated(_lowerCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Tuple = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCamelCase : Dict = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCamelCase : Tuple = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCamelCase : Any = (torch.cuda.memory_allocated(_lowerCamelCase ) - start_measures[str(_lowerCamelCase )]) / 2**20
__lowerCamelCase : Optional[int] = (torch.cuda.max_memory_allocated(_lowerCamelCase ) - start_measures[str(_lowerCamelCase )]) / 2**20
return measures
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_lowerCamelCase )]:.2f}MiB""" )
__lowerCamelCase : List[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 366
| 0
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCAmelCase = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_lowerCAmelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 18
|
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case = get_tests_dir('''fixtures/dummy-config.json''')
class UpperCAmelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = 0
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_snake_case = os.path.join(__lowerCamelCase , '''fake-roberta''' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Wrong model type will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoConfig.register('''model''' , __lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoConfig.register('''bert''' , __lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_snake_case = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
_snake_case = AutoConfig.from_pretrained('''bert-base''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
__lowerCamelCase , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__lowerCamelCase )
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase )
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = '''new-model'''
try:
AutoConfig.register('''new-model''' , __lowerCamelCase )
# If remote code is not set, the default is to use local
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
_snake_case = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 103
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
A__ : str = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[Any] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b"
A__ : List[str] = max(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ), b_binary.zfill(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A_ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Tuple = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
A__ : Dict = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
A__ : Optional[int] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
A__ : Any = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
A__ : Optional[Any] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
A__ : str = text_classifier("""This is great !""" , return_all_scores=snake_case )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
A__ : int = text_classifier("""This is great !""" , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
A__ : str = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
A__ : Optional[int] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def _UpperCamelCase ( self : int ):
'''simple docstring'''
import torch
A__ : Tuple = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
A__ : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : str = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
A__ : Optional[Any] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Optional[int] = pipeline("""text-classification""" )
A__ : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
A__ : Optional[Any] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
A__ : Dict = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Union[str, Any] = pipeline("""text-classification""" , framework="""tf""" )
A__ : Dict = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
A__ : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
A__ : Tuple = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Any , snake_case : List[Any] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = TextClassificationPipeline(model=snake_case , tokenizer=snake_case )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _UpperCamelCase ( self : int , snake_case : Union[str, Any] , snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
A__ : int = """HuggingFace is in"""
A__ : List[Any] = text_classifier(snake_case )
self.assertEqual(nested_simplify(snake_case ) , [{"""label""": ANY(snake_case ), """score""": ANY(snake_case )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
A__ : Any = ["""HuggingFace is in """, """Paris is in France"""]
A__ : Any = text_classifier(snake_case )
self.assertEqual(
nested_simplify(snake_case ) , [{"""label""": ANY(snake_case ), """score""": ANY(snake_case )}, {"""label""": ANY(snake_case ), """score""": ANY(snake_case )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
A__ : List[Any] = text_classifier(snake_case , top_k=snake_case )
A__ : Tuple = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(snake_case ) , [[{"""label""": ANY(snake_case ), """score""": ANY(snake_case )}] * N, [{"""label""": ANY(snake_case ), """score""": ANY(snake_case )}] * N] , )
A__ : Optional[Any] = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
A__ : Union[str, Any] = text_classifier(snake_case )
self.assertEqual(
nested_simplify(snake_case ) , {"""label""": ANY(snake_case ), """score""": ANY(snake_case )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
A__ : Tuple = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(snake_case ):
text_classifier(snake_case )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
A__ : List[Any] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(snake_case ) , [{"""label""": ANY(snake_case ), """score""": ANY(snake_case )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 498
| 0
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
SCREAMING_SNAKE_CASE = 6_3_7_8_1_3_7.0
SCREAMING_SNAKE_CASE = 6_3_5_6_7_5_2.3_1_4_2_4_5
SCREAMING_SNAKE_CASE = 6_3_7_8_1_3_7
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
UpperCAmelCase__ : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase__ : List[str] = atan((1 - flattening) * tan(radians(lowercase__ ) ) )
UpperCAmelCase__ : str = atan((1 - flattening) * tan(radians(lowercase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase__ : Any = haversine_distance(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase__ : Any = (b_lata + b_lata) / 2
UpperCAmelCase__ : str = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase__ : Dict = (sin(lowercase__ ) ** 2) * (cos(lowercase__ ) ** 2)
UpperCAmelCase__ : Optional[Any] = cos(sigma / 2 ) ** 2
UpperCAmelCase__ : int = (sigma - sin(lowercase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase__ : Optional[int] = (cos(lowercase__ ) ** 2) * (sin(lowercase__ ) ** 2)
UpperCAmelCase__ : Optional[Any] = sin(sigma / 2 ) ** 2
UpperCAmelCase__ : str = (sigma + sin(lowercase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199
|
'''simple docstring'''
from math import factorial
SCREAMING_SNAKE_CASE = {str(digit): factorial(digit) for digit in range(1_0)}
def snake_case_ ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase__ ) )
def snake_case_ ( lowercase__ = 6_0 , lowercase__ = 1_0_0_0_0_0_0 ):
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
UpperCAmelCase__ : Union[str, Any] = 0
# the cached sizes of the previous chains
UpperCAmelCase__ : dict[int, int] = {}
for start_chain_element in range(1 , lowercase__ ):
# The temporary set will contain the elements of the chain
UpperCAmelCase__ : Any = set()
UpperCAmelCase__ : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase__ : List[str] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase__ )
chain_set_length += 1
UpperCAmelCase__ : List[str] = digit_factorial_sum(lowercase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase__ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 199
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , a : str , a : List[Any]=7 , a : Dict=3 , a : Optional[Any]=18 , a : List[Any]=30 , a : Union[str, Any]=400 , a : Tuple=True , a : Optional[int]=32 , a : Dict=True , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : List[str] = min_resolution
lowerCAmelCase__ : int = max_resolution
lowerCAmelCase__ : Any = do_resize
lowerCAmelCase__ : Tuple = size_divisor
lowerCAmelCase__ : Optional[Any] = do_rescale
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = GLPNImageProcessingTester(self )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size_divisor' ) )
self.assertTrue(hasattr(a , 'resample' ) )
self.assertTrue(hasattr(a , 'do_rescale' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 709
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[Any] = keep_accents
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 69
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def _snake_case ( self ) -> Any:
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.dummy_uncond_unet
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = self.dummy_vq_model
_lowerCAmelCase = LDMPipeline(unet=_lowerCAmelCase , vqvae=_lowerCAmelCase , scheduler=_lowerCAmelCase )
ldm.to(_lowerCAmelCase )
ldm.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="numpy" ).images
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=2 , output_type="numpy" , return_dict=_lowerCAmelCase )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_lowerCAmelCase = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(_lowerCAmelCase )
ldm.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ldm(generator=_lowerCAmelCase , num_inference_steps=5 , output_type="numpy" ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
_lowerCAmelCase = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 18
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]=13 , __lowerCAmelCase : Dict=[30, 30] , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Any=32 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : str=10 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : Optional[Any]=10 , ) -> Dict:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = n_targets
A__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ = num_patches + 1 + self.num_detection_tokens
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ = []
for i in range(self.batch_size ):
A__ = {}
A__ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__lowerCAmelCase )
A__ = torch.rand(self.n_targets , 4 , device=__lowerCAmelCase )
labels.append(__lowerCAmelCase )
A__ = self.get_config()
return config, pixel_values, labels
def a_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def a_ ( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
A__ = YolosModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def a_ ( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ = YolosForObjectDetection(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(pixel_values=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ = model(pixel_values=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def a_ ( self : Dict ) -> Any:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__lowerCamelCase : int = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = False
def a_ ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=False ) -> str:
"""simple docstring"""
A__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ = []
for i in range(self.model_tester.batch_size ):
A__ = {}
A__ = torch.ones(
size=(self.model_tester.n_targets,) , device=__lowerCAmelCase , dtype=torch.long )
A__ = torch.ones(
self.model_tester.n_targets , 4 , device=__lowerCAmelCase , dtype=torch.float )
labels.append(__lowerCAmelCase )
A__ = labels
return inputs_dict
def a_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
A__ = YolosModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def a_ ( self : int ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
def a_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCAmelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def a_ ( self : Any ) -> int:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
# in YOLOS, the seq_len is different
A__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
A__ = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
A__ = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ = len(__lowerCAmelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
A__ = 1
self.assertEqual(out_len + added_hidden_states , len(__lowerCAmelCase ) )
A__ = outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
A__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
A__ = outputs.hidden_states
A__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# YOLOS has a different seq_length
A__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__lowerCAmelCase )
@slow
def a_ ( self : Any ) -> int:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = YolosModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def a_ ( self : List[str] ) -> Dict:
"""simple docstring"""
A__ = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(__lowerCAmelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
A__ = model(inputs.pixel_values )
# verify outputs
A__ = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
A__ = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=__lowerCAmelCase , )
A__ = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
# verify postprocessing
A__ = image_processor.post_process_object_detection(
__lowerCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(__lowerCAmelCase )
A__ = [75, 75, 17, 63, 17]
A__ = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__lowerCAmelCase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , __lowerCAmelCase , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , __lowerCAmelCase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , __lowerCAmelCase ) )
| 715
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : int = logging.get_logger(__name__)
A : Optional[Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = '''visual_bert'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=3_05_22 , __lowerCAmelCase : Dict=7_68 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : Optional[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Optional[int]=5_12 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : List[Any]=1e-12 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=True , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : Optional[Any]=2 , **__lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = visual_embedding_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = bypass_transformer
A__ = special_visual_initialize
| 247
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_: str = logging.get_logger(__name__)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : str = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
snake_case__ : Dict = DetaConfig(
backbone_config=UpperCAmelCase_ , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=UpperCAmelCase_ , with_box_refine=UpperCAmelCase_ , two_stage=UpperCAmelCase_ , )
# set labels
snake_case__ : Optional[int] = """huggingface/label-files"""
if "o365" in model_name:
snake_case__ : List[Any] = 366
snake_case__ : Dict = """object365-id2label.json"""
else:
snake_case__ : List[str] = 91
snake_case__ : List[str] = """coco-detection-id2label.json"""
snake_case__ : Tuple = num_labels
snake_case__ : Tuple = json.load(open(cached_download(hf_hub_url(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="""dataset""")) , """r"""))
snake_case__ : str = {int(UpperCAmelCase_): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : str = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight"""))
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias"""))
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight"""))
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias"""))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'))
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'))
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight'))
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight'))
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias'))
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight"""))
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias"""))
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight"""))
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias"""))
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight"""))
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias"""))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias'))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias'))
# fmt: on
return rename_keys
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[Any] = dct.pop(UpperCAmelCase_)
snake_case__ : int = val
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Union[str, Any] = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
snake_case__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case__ : Tuple = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight')
snake_case__ : Optional[Any] = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[:dim, :]
snake_case__ : Dict = in_proj_bias[: dim]
snake_case__ : int = in_proj_weight[
dim : dim * 2, :
]
snake_case__ : Dict = in_proj_bias[
dim : dim * 2
]
snake_case__ : Dict = in_proj_weight[
-dim :, :
]
snake_case__ : Any = in_proj_bias[-dim :]
# fmt: on
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Optional[int] = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
snake_case__ : Optional[int] = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight')
snake_case__ : int = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias')
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[:hidden_size, :]
snake_case__ : Tuple = in_proj_bias[:hidden_size]
snake_case__ : Optional[Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
snake_case__ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : List[str] = in_proj_weight[-hidden_size:, :]
snake_case__ : Union[str, Any] = in_proj_bias[-hidden_size:]
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : List[str] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_).raw)
return im
@torch.no_grad()
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : int = get_deta_config(UpperCAmelCase_)
# load original state dict
if model_name == "deta-swin-large":
snake_case__ : Optional[int] = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""")
elif model_name == "deta-swin-large-o365":
snake_case__ : Tuple = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""")
else:
raise ValueError(F'Model name {model_name} not supported')
snake_case__ : Any = torch.load(UpperCAmelCase_ , map_location="""cpu""")["""model"""]
# original state dict
for name, param in state_dict.items():
print(UpperCAmelCase_ , param.shape)
# rename keys
snake_case__ : Optional[int] = create_rename_keys(UpperCAmelCase_)
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
read_in_swin_q_k_v(UpperCAmelCase_ , config.backbone_config)
read_in_decoder_q_k_v(UpperCAmelCase_ , UpperCAmelCase_)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
snake_case__ : Any = state_dict.pop(UpperCAmelCase_)
snake_case__ : Union[str, Any] = val
if "input_proj" in key:
snake_case__ : Optional[Any] = state_dict.pop(UpperCAmelCase_)
snake_case__ : Dict = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
snake_case__ : Tuple = state_dict.pop(UpperCAmelCase_)
snake_case__ : List[str] = val
# finally, create HuggingFace model and load state dict
snake_case__ : List[str] = DetaForObjectDetection(UpperCAmelCase_)
model.load_state_dict(UpperCAmelCase_)
model.eval()
snake_case__ : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(UpperCAmelCase_)
# load image processor
snake_case__ : Tuple = DetaImageProcessor(format="""coco_detection""")
# verify our conversion on image
snake_case__ : Any = prepare_img()
snake_case__ : Tuple = processor(images=UpperCAmelCase_ , return_tensors="""pt""")
snake_case__ : Tuple = encoding["""pixel_values"""]
snake_case__ : List[str] = model(pixel_values.to(UpperCAmelCase_))
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3])
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
snake_case__ : int = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]])
snake_case__ : Optional[Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
elif model_name == "deta-swin-large-o365":
snake_case__ : Optional[int] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]])
snake_case__ : List[str] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCAmelCase_) , atol=1e-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCAmelCase_) , atol=1e-4)
print("""Everything ok!""")
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...')
Path(UpperCAmelCase_).mkdir(exist_ok=UpperCAmelCase_)
model.save_pretrained(UpperCAmelCase_)
processor.save_pretrained(UpperCAmelCase_)
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""")
model.push_to_hub(F'jozhang97/{model_name}')
processor.push_to_hub(F'jozhang97/{model_name}')
if __name__ == "__main__":
lowercase_: int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase_: str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 648
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ):
debug_launcher(test_script.main )
def lowercase ( self : Tuple ):
debug_launcher(test_ops.main )
| 648
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 714
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__SCREAMING_SNAKE_CASE : str = MaskFormerConfig(backbone_config=lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Optional[Any] = 847
__SCREAMING_SNAKE_CASE : Optional[int] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Dict = 150
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Union[str, Any] = 171
__SCREAMING_SNAKE_CASE : Tuple = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__SCREAMING_SNAKE_CASE : Dict = 133
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Optional[int] = 19
__SCREAMING_SNAKE_CASE : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__SCREAMING_SNAKE_CASE : Tuple = 65
__SCREAMING_SNAKE_CASE : Optional[Any] = '''mapillary-vistas-id2label.json'''
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Any = {int(lowercase__ ): v for k, v in idalabel.items()}
return config
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__SCREAMING_SNAKE_CASE : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Dict = in_proj_weight[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
-dim :, :
]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-dim :]
# fmt: on
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# fmt: off
__SCREAMING_SNAKE_CASE : Any = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[: hidden_size, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:config.hidden_size]
__SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
__SCREAMING_SNAKE_CASE : Dict = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[-hidden_size :, :]
__SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[: hidden_size, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:config.hidden_size]
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-hidden_size :, :]
__SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[-hidden_size :]
# fmt: on
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Dict = get_maskformer_config(lowercase__ )
# load original state_dict
with open(lowercase__ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Any = pickle.load(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__SCREAMING_SNAKE_CASE : Dict = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_swin_q_k_v(lowercase__ , config.backbone_config )
read_in_decoder_q_k_v(lowercase__ , lowercase__ )
# update to torch tensors
for key, value in state_dict.items():
__SCREAMING_SNAKE_CASE : Any = torch.from_numpy(lowercase__ )
# load 🤗 model
__SCREAMING_SNAKE_CASE : Union[str, Any] = MaskFormerForInstanceSegmentation(lowercase__ )
model.eval()
for name, param in model.named_parameters():
print(lowercase__ , param.shape )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase__ ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 65
elif "cityscapes" in model_name:
__SCREAMING_SNAKE_CASE : str = 65535
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 255
__SCREAMING_SNAKE_CASE : Dict = True if '''ade''' in model_name else False
__SCREAMING_SNAKE_CASE : int = MaskFormerImageProcessor(ignore_index=lowercase__ , reduce_labels=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(lowercase__ , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Any = model(**lowercase__ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 260
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=14 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Union[str, Any]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = use_mc_token_ids
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , *_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
@slow
def lowerCAmelCase_ ( self : str ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : int ):
pass
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_lowerCAmelCase ) # Legal the president is
SCREAMING_SNAKE_CASE_ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase )
| 31
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__a : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
__a : Union[str, Any] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "retribert"
def __init__( self : Any , UpperCamelCase_ : int=30_522 , UpperCamelCase_ : Optional[int]=768 , UpperCamelCase_ : Union[str, Any]=8 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : List[Any]=3_072 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : List[str]=512 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : List[str]=1e-12 , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=128 , UpperCamelCase_ : Union[str, Any]=0 , **UpperCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = share_encoders
__A = projection_dim
| 637
| 0
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ["pixel_values"]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = 1 / 255 , UpperCAmelCase__ = True , UpperCAmelCase__ = 8 , **UpperCAmelCase__ , ):
super().__init__(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
SCREAMING_SNAKE_CASE__ = pad_size
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ ):
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_image_size(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE__ = (old_width // size + 1) * size - old_width
return pad(UpperCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE__ = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE__ = [self.pad(UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
SCREAMING_SNAKE_CASE__ = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 112
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase_ )
env_command_parser(subparsers=lowerCamelCase_ )
launch_command_parser(subparsers=lowerCamelCase_ )
tpu_command_parser(subparsers=lowerCamelCase_ )
test_command_parser(subparsers=lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if not hasattr(lowerCamelCase_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 112
| 1
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=10_24 ) -> Any:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = [], []
lowerCAmelCase__ = list(zip(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase__ ,lowerCAmelCase__ = sorted_examples[0]
def is_too_big(UpperCamelCase_ : Union[str, Any] ):
return tok(UpperCamelCase_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCAmelCase__ = new_src + " " + src
lowerCAmelCase__ = new_tgt + " " + tgt
if is_too_big(UpperCamelCase_ ) or is_too_big(UpperCamelCase_ ): # cant fit, finalize example
finished_src.append(UpperCamelCase_ )
finished_tgt.append(UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = src, tgt
else: # can fit, keep adding
lowerCAmelCase__ ,lowerCAmelCase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase_ )
finished_tgt.append(UpperCamelCase_ )
return finished_src, finished_tgt
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Path , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = Path(UpperCamelCase_ )
save_path.mkdir(exist_ok=UpperCamelCase_ )
for split in ["train"]:
lowerCAmelCase__ ,lowerCAmelCase__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
lowerCAmelCase__ = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()]
lowerCAmelCase__ = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()]
lowerCAmelCase__ ,lowerCAmelCase__ = pack_examples(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(F"""packed {split} split from {len(UpperCamelCase_ )} examples -> {len(UpperCamelCase_ )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(UpperCamelCase_ ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(UpperCamelCase_ ) )
for split in ["val", "test"]:
lowerCAmelCase__ ,lowerCAmelCase__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase_ , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase_ , save_path / F"""{split}.target""" )
def A ( ) -> int:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=UpperCamelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=UpperCamelCase_ , default=1_28 )
parser.add_argument("--data_dir" , type=UpperCamelCase_ )
parser.add_argument("--save_path" , type=UpperCamelCase_ )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 48
|
'''simple docstring'''
UpperCamelCase_ = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCAmelCase ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 384
| 0
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 702
|
import numpy as np
import qiskit
def UpperCamelCase__ ( A__ = 8 , A__ = None ) -> str:
snake_case__ : Optional[int] = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : Tuple = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
snake_case__ : Any = qiskit.QuantumCircuit(A__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Optional[Any] = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
snake_case__ : Union[str, Any] = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : Tuple = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , '0' )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 699
| 0
|
"""simple docstring"""
UpperCAmelCase__ =0 # The first color of the flag.
UpperCAmelCase__ =1 # The second color of the flag.
UpperCAmelCase__ =2 # The third color of the flag.
UpperCAmelCase__ =(red, white, blue)
def lowerCAmelCase_ ( UpperCamelCase__ : list ):
"""simple docstring"""
if not sequence:
return []
if len(UpperCamelCase__ ) == 1:
return list(UpperCamelCase__ )
__lowercase = 0
__lowercase = len(UpperCamelCase__ ) - 1
__lowercase = 0
while mid <= high:
if sequence[mid] == colors[0]:
__lowercase , __lowercase = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__lowercase , __lowercase = sequence[high], sequence[mid]
high -= 1
else:
__lowercase = f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(UpperCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ =input("Enter numbers separated by commas:\n").strip()
UpperCAmelCase__ =[int(item.strip()) for item in user_input.split(",")]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 616
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ =logging.get_logger(__name__)
class lowerCamelCase__ ( _a ):
a : Optional[int] = """vision-encoder-decoder"""
a : Dict = True
def __init__( self : Dict , **A_ : List[Any] ):
'''simple docstring'''
super().__init__(**A_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__lowercase = kwargs.pop("""encoder""" )
__lowercase = encoder_config.pop("""model_type""" )
__lowercase = kwargs.pop("""decoder""" )
__lowercase = decoder_config.pop("""model_type""" )
__lowercase = AutoConfig.for_model(A_ , **A_ )
__lowercase = AutoConfig.for_model(A_ , **A_ )
__lowercase = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : Any ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__lowercase = True
__lowercase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.encoder.to_dict()
__lowercase = self.decoder.to_dict()
__lowercase = self.__class__.model_type
return output
class lowerCamelCase__ ( _a ):
a : Dict = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCamelCase__ ( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = OrderedDict()
__lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowercase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : "PreTrainedTokenizerBase" , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
__lowercase = OrderedDict()
__lowercase = super().generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
__lowercase , __lowercase = dummy_input["""input_ids"""].shape
__lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowercase = dummy_input.pop("""input_ids""" )
__lowercase = dummy_input.pop("""attention_mask""" )
__lowercase = torch.zeros(A_ )
return common_inputs
class lowerCamelCase__ ( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(A_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : PretrainedConfig , A_ : PretrainedConfig , A_ : str = "default" ):
'''simple docstring'''
__lowercase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(A_ , A_ )
| 616
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (A_ :Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , A_ , )
if isinstance(A_ , torch.Tensor ):
return image
elif isinstance(A_ , PIL.Image.Image ):
a : Tuple = [image]
if isinstance(image[0] , PIL.Image.Image ):
a, a : Optional[Any] = image[0].size
a, a : Optional[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
a : Tuple = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a : Optional[int] = np.concatenate(A_ , axis=0 )
a : Any = np.array(A_ ).astype(np.floataa ) / 255.0
a : Tuple = image.transpose(0 , 3 , 1 , 2 )
a : List[Any] = 2.0 * image - 1.0
a : int = torch.from_numpy(A_ )
elif isinstance(image[0] , torch.Tensor ):
a : Optional[int] = torch.cat(A_ , dim=0 )
return image
def snake_case (A_ :Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
if isinstance(A_ , torch.Tensor ):
return mask
elif isinstance(A_ , PIL.Image.Image ):
a : Optional[Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
a, a : str = mask[0].size
a, a : Tuple = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
a : Dict = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
a : Union[str, Any] = np.concatenate(A_ , axis=0 )
a : str = mask.astype(np.floataa ) / 255.0
a : str = 0
a : List[str] = 1
a : List[str] = torch.from_numpy(A_ )
elif isinstance(mask[0] , torch.Tensor ):
a : Union[str, Any] = torch.cat(A_ , dim=0 )
return mask
class snake_case ( UpperCAmelCase ):
__magic_name__ = 42
__magic_name__ = 42
def __init__( self : Tuple , A : List[str] , A : Tuple ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self : Optional[int] , A : Union[torch.Tensor, PIL.Image.Image] , A : Union[torch.Tensor, PIL.Image.Image] , A : int = 2_5_0 , A : float = 0.0 , A : int = 1_0 , A : int = 1_0 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[str] = "pil" , A : bool = True , ):
'''simple docstring'''
a : List[Any] = image
a : int = _preprocess_image(A )
a : str = original_image.to(device=self.device , dtype=self.unet.dtype )
a : Dict = _preprocess_mask(A )
a : Union[str, Any] = mask_image.to(device=self.device , dtype=self.unet.dtype )
a : Any = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
a : Optional[int] = original_image.shape
a : List[Any] = randn_tensor(A , generator=A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A , A , A , self.device )
a : Union[str, Any] = eta
a : int = self.scheduler.timesteps[0] + 1
a : Optional[int] = generator[0] if isinstance(A , A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
a : List[Any] = self.unet(A , A ).sample
# compute previous image: x_t -> x_t-1
a : List[Any] = self.scheduler.step(A , A , A , A , A , A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
a : str = self.scheduler.undo_step(A , A , A )
a : List[str] = t
a : Any = (image / 2 + 0.5).clamp(0 , 1 )
a : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : int = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 118
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_UpperCamelCase : List[str] = logging.getLogger()
def snake_case (A_ :Path , A_ :list ):
'''simple docstring'''
a : Optional[int] = '\n'.join(A_ )
Path(A_ ).open('w' ).writelines(A_ )
_UpperCamelCase : Optional[Any] = 'patrickvonplaten/t5-tiny-random'
_UpperCamelCase : str = 'sshleifer/bart-tiny-random'
_UpperCamelCase : Any = 'sshleifer/tiny-mbart'
_UpperCamelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : List[Any] , A : Optional[Any] ):
'''simple docstring'''
a : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
a : List[str] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
a : Optional[Any] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(A , A )
a : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
a : List[Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
a : List[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(A , 'argv' , A ):
run_generate()
assert Path(A ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.run_eval_tester(A )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase__ ( self : Union[str, Any] , A : List[Any] ):
'''simple docstring'''
self.run_eval_tester(A )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase__ ( self : Optional[Any] , A : List[Any] ):
'''simple docstring'''
a : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
a : int = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
a : Optional[Any] = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
a : int = Path(self.get_auto_remove_tmp_dir() )
a : int = str(tmp_dir / 'scores.json' )
a : Optional[int] = str(tmp_dir / 'val.target' )
_dump_articles(A , text['en'] )
_dump_articles(A , text['de'] )
a : List[str] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
a : Any = F'''
run_eval_search.py
{model}
{str(A )}
{str(A )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(A , 'argv' , A ):
with CaptureStdout() as cs:
run_search()
a : Tuple = [' num_beams | length_penalty', model, 'Best score args']
a : List[str] = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(A )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(A ).exists()
os.remove(Path(A ) )
| 118
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A :
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any=13 , lowercase_ : Any=10 , lowercase_ : Any=3 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=True , lowercase_ : List[str]=True , lowercase_ : List[str]=32 , lowercase_ : Any=5 , lowercase_ : List[Any]=4 , lowercase_ : Any=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : str=0.1 , lowercase_ : int=10 , lowercase_ : List[str]=0.02 , lowercase_ : int="divided_space_time" , lowercase_ : Optional[int]=None , ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =parent
_lowerCamelCase : List[Any] =batch_size
_lowerCamelCase : List[Any] =image_size
_lowerCamelCase : str =num_channels
_lowerCamelCase : Dict =patch_size
_lowerCamelCase : Any =num_frames
_lowerCamelCase : Union[str, Any] =is_training
_lowerCamelCase : int =use_labels
_lowerCamelCase : Any =hidden_size
_lowerCamelCase : List[Any] =num_hidden_layers
_lowerCamelCase : Union[str, Any] =num_attention_heads
_lowerCamelCase : int =intermediate_size
_lowerCamelCase : Dict =hidden_act
_lowerCamelCase : str =hidden_dropout_prob
_lowerCamelCase : Union[str, Any] =attention_probs_dropout_prob
_lowerCamelCase : str =attention_type
_lowerCamelCase : List[str] =initializer_range
_lowerCamelCase : Tuple =scope
_lowerCamelCase : List[Any] =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowerCamelCase : Dict =(image_size // patch_size) ** 2
_lowerCamelCase : str =(num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_lowerCamelCase : Optional[int] =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] =None
if self.use_labels:
_lowerCamelCase : List[Any] =ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase : List[Any] =self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : str =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_lowerCamelCase : List[Any] =self.num_labels
return config
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =TimesformerModel(config=__a )
model.to(__a )
model.eval()
_lowerCamelCase : List[Any] =model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Any ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] =TimesformerForVideoClassification(__a )
model.to(__a )
model.eval()
_lowerCamelCase : str =model(__a )
# verify the logits shape
_lowerCamelCase : str =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __a )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.prepare_config_and_inputs()
_lowerCamelCase : int =config_and_inputs
_lowerCamelCase : List[str] ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ : Union[str, Any] =(
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Any =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[int] =False
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] =TimesformerModelTester(self )
_lowerCamelCase : List[Any] =ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int=False ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : str =copy.deepcopy(__a )
if return_labels:
if model_class in get_values(__a ):
_lowerCamelCase : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowerCamelCase ( self : Dict ) -> str:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] =model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : int =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str =model_class(__a )
_lowerCamelCase : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] =[*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__a )
@slow
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str =TimesformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowerCamelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : int =True
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] =self.model_tester.seq_length
_lowerCamelCase : Union[str, Any] =self.model_tester.num_frames
_lowerCamelCase : Union[str, Any] =True
_lowerCamelCase : Any =False
_lowerCamelCase : Optional[Any] =True
_lowerCamelCase : List[Any] =model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_lowerCamelCase : Dict =model(**self._prepare_for_class(__a , __a ) )
_lowerCamelCase : str =outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : int =True
_lowerCamelCase : List[str] =model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_lowerCamelCase : int =model(**self._prepare_for_class(__a , __a ) )
_lowerCamelCase : Union[str, Any] =outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_lowerCamelCase : Tuple =len(__a )
# Check attention is always last and order is fine
_lowerCamelCase : Optional[int] =True
_lowerCamelCase : List[Any] =True
_lowerCamelCase : List[Any] =model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_lowerCamelCase : Any =model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 1 , len(__a ) )
_lowerCamelCase : Union[str, Any] =outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int] ):
_lowerCamelCase : Optional[int] =model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] =model(**self._prepare_for_class(__a , __a ) )
_lowerCamelCase : Any =outputs.hidden_states
_lowerCamelCase : Dict =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__a ) , __a )
_lowerCamelCase : str =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowerCamelCase : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] =True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Dict =True
check_hidden_states_output(__a , __a , __a )
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] =hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowerCamelCase : Any =np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : str =TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__a )
_lowerCamelCase : Optional[Any] =self.default_image_processor
_lowerCamelCase : int =prepare_video()
_lowerCamelCase : Union[str, Any] =image_processor(video[:8] , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple =model(**__a )
# verify the logits
_lowerCamelCase : Union[str, Any] =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __a )
_lowerCamelCase : Optional[int] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 464
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Any:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__: List[str] = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase__ :
__lowerCamelCase = OPTConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Dict=13 , __a : Dict=7 , __a : Optional[Any]=True , __a : Any=False , __a : Tuple=99 , __a : Optional[int]=16 , __a : Any=2 , __a : Optional[Any]=4 , __a : Union[str, Any]=4 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : int=0.1 , __a : List[Any]=20 , __a : Tuple=2 , __a : str=1 , __a : str=0 , __a : List[Any]=16 , __a : Optional[Any]=16 , ):
'''simple docstring'''
lowerCamelCase__: List[str] = parent
lowerCamelCase__: List[str] = batch_size
lowerCamelCase__: Dict = seq_length
lowerCamelCase__: List[str] = is_training
lowerCamelCase__: Dict = use_labels
lowerCamelCase__: Union[str, Any] = vocab_size
lowerCamelCase__: Union[str, Any] = hidden_size
lowerCamelCase__: Any = num_hidden_layers
lowerCamelCase__: Union[str, Any] = num_attention_heads
lowerCamelCase__: Tuple = intermediate_size
lowerCamelCase__: Optional[int] = hidden_act
lowerCamelCase__: Union[str, Any] = hidden_dropout_prob
lowerCamelCase__: str = attention_probs_dropout_prob
lowerCamelCase__: List[str] = max_position_embeddings
lowerCamelCase__: Tuple = eos_token_id
lowerCamelCase__: Any = pad_token_id
lowerCamelCase__: str = bos_token_id
lowerCamelCase__: Optional[int] = embed_dim
lowerCamelCase__: Union[str, Any] = word_embed_proj_dim
lowerCamelCase__: List[Any] = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__: Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__: Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__: Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
lowerCamelCase__: Optional[Any] = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def lowerCamelCase_ ( self : str , __a : Optional[Any] , __a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = TFOPTModel(config=__a )
lowerCamelCase__: Optional[Any] = inputs_dict["""input_ids"""]
lowerCamelCase__: Dict = input_ids[:1, :]
lowerCamelCase__: Any = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase__: Any = 1
# first forward pass
lowerCamelCase__: str = model(__a , attention_mask=__a , use_cache=__a )
lowerCamelCase__ , lowerCamelCase__: Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__: Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase__: str = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase__: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase__: Any = model(__a , attention_mask=__a )[0]
lowerCamelCase__: Any = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase__: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase__: Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
@require_tf
class lowerCamelCase__ ( A__ , A__ , unittest.TestCase ):
__lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = 10
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModelTester(self )
lowerCamelCase__: Tuple = ConfigTester(self , config_class=__a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a : Optional[int] , __a : Dict ):
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase__: int = model_class(config=__a )
lowerCamelCase__: Tuple = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: Optional[int] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
lowerCamelCase__: str = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: List[str] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase__: Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
lowerCamelCase__: Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: Any = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
lowerCamelCase__: List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: List[Any] = False
self.assertTrue(__a )
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(_UpperCamelCase , dtype=tf.intaa )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
__lowerCamelCase = 99
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase__: Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase__: Any = input_ids.shape[0]
lowerCamelCase__: List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase__: List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCamelCase__: Optional[Any] = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase__: str = model(input_ids=__a , attention_mask=__a ).last_hidden_state
lowerCamelCase__: str = (1, 11, 512)
self.assertEqual(output.shape , __a )
lowerCamelCase__: str = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-3 ) )
lowerCamelCase__: Optional[int] = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-2 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Dict = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase__: Dict = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase__: Union[str, Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase__: Union[str, Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a , add_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase__: Dict = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
lowerCamelCase__: Any = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """facebook/opt-125m"""
lowerCamelCase__: Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Any = []
lowerCamelCase__: Optional[Any] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: str = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: Dict = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Any = model.generate(__a , max_length=10 )
lowerCamelCase__: Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
lowerCamelCase__: Tuple = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: Any = TFOPTForCausalLM.from_pretrained(__a )
lowerCamelCase__: Tuple = """left"""
# use different length sentences to test batching
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase__: List[Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a )
lowerCamelCase__: Any = inputs["""input_ids"""]
lowerCamelCase__: int = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase__: Optional[int] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[Any] = model.generate(input_ids=__a )
lowerCamelCase__: int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase__: Dict = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: str = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCamelCase__: List[str] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCamelCase__: Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: Dict = """facebook/opt-350m"""
lowerCamelCase__: Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Dict = []
lowerCamelCase__: int = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: str = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[int] = model.generate(__a , max_length=10 )
lowerCamelCase__: Any = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 306
| 0
|
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) -> str:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCAmelCase__ = (boundary[1] - boundary[0]) / steps
lowerCAmelCase__ = boundary[0]
lowerCAmelCase__ = boundary[1]
lowerCAmelCase__ = make_points(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase__ = 0.0
y += (h / 2.0) * f(UpperCAmelCase_ )
for i in x_i:
# print(i)
y += h * f(UpperCAmelCase_ )
y += (h / 2.0) * f(UpperCAmelCase_ )
return y
def _lowerCAmelCase( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ) -> Tuple:
lowerCAmelCase__ = a + h
while x < (b - h):
yield x
lowerCAmelCase__ = x + h
def _lowerCAmelCase( UpperCAmelCase_ : Optional[int] ) -> Any: # enter your function here
lowerCAmelCase__ = (x - 0) * (x - 0)
return y
def _lowerCAmelCase( ) -> Any:
lowerCAmelCase__ = 0.0 # Lower bound of integration
lowerCAmelCase__ = 1.0 # Upper bound of integration
lowerCAmelCase__ = 10.0 # define number of steps or resolution
lowerCAmelCase__ = [a, b] # define boundary of integration
lowerCAmelCase__ = method_a(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 211
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
_UpperCamelCase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
_UpperCamelCase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def lowercase__ ( self : Optional[int] , __A : str , __A : Optional[Any] , __A : Dict=None , __A : Union[str, Any]=1 , __A : Tuple="binary" , __A : str=None ) -> str:
'''simple docstring'''
lowerCAmelCase__ = fa_score(
__A , __A , labels=__A , pos_label=__A , average=__A , sample_weight=__A )
return {"f1": float(__A ) if score.size == 1 else score}
| 211
| 1
|
# Lint as: python3
import itertools
import os
import re
_snake_case = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
_snake_case = re.compile(R'''([a-z\d])([A-Z])''')
_snake_case = re.compile(R'''(?<!_)_(?!_)''')
_snake_case = re.compile(R'''(_{2,})''')
_snake_case = R"""^\w+(\.\w+)*$"""
_snake_case = R"""<>:/\|?*"""
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = _uppercase_uppercase_re.sub(r"\1_\2" , snake_case_ )
lowerCamelCase : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , snake_case_ )
return name.lower()
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = _single_underscore_re.split(snake_case_ )
lowerCamelCase : Optional[Any] = [_multiple_underscores_re.split(snake_case_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case_ ) if n != "" )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if os.path.basename(snake_case_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(snake_case_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if os.path.basename(snake_case_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , snake_case_ ):
raise ValueError(f"""Split name should match \'{_split_re}\'\' but got \'{split}\'.""" )
return f"""{filename_prefix_for_name(snake_case_ )}-{split}"""
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : Tuple = filename_prefix_for_split(snake_case_ , snake_case_ )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
lowerCamelCase : Dict = os.path.join(snake_case_ , snake_case_ )
return f"""{filepath}*"""
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = filename_prefix_for_split(snake_case_ , snake_case_ )
lowerCamelCase : Any = os.path.join(snake_case_ , snake_case_ )
if shard_lengths:
lowerCamelCase : Optional[int] = len(snake_case_ )
lowerCamelCase : Optional[int] = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(snake_case_ )]
if filetype_suffix:
lowerCamelCase : int = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowerCamelCase : Tuple = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 340
|
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
snake_case__ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case__ : set[int] = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def SCREAMING_SNAKE_CASE ( snake_case_ : dict , snake_case_ : int , snake_case_ : set , snake_case_ : set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 297
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self : List[str] , a : Any , a : Optional[int] , a : List[str] , a : Optional[Any] , a : Optional[Any] , a : Optional[int] , a : List[Any] , a : List[str] , a : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowercase : Dict = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
lowercase : Any = dict(scheduler.config )
lowercase : List[str] = 1
lowercase : Optional[Any] = FrozenDict(_SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowercase : str = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
lowercase : List[str] = dict(scheduler.config )
lowercase : Any = True
lowercase : str = FrozenDict(_SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_SCREAMING_SNAKE_CASE , segmentation_processor=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self : Union[str, Any] , a : List[str] = "auto" ) -> str:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase : Union[str, Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Dict , a : Any , a : List[str] , a : Optional[Any] , a : int = 512 , a : Optional[int] = 512 , a : int = 50 , a : Any = 7.5 , a : List[Any] = None , a : Dict = 1 , a : List[Any] = 0.0 , a : Dict = None , a : Optional[Any] = None , a : Dict = "pil" , a : Any = True , a : Dict = None , a : List[Any] = 1 , **a : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowercase : Optional[int] = self.segmentation_model(**_SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase : List[Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , )
| 709
|
"""simple docstring"""
def A_ ( __UpperCamelCase : str , __UpperCamelCase : str ):
lowercase = len(__UpperCamelCase )
lowercase = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
lowercase = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
lowercase = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 396
| 0
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowerCAmelCase : str = re.compile(R'''\s+''')
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_lowerCamelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Any = [len(_lowerCamelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCamelCase ), "line_max": max(_lowerCamelCase )}
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=5 ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : str = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : str = example["content"].splitlines()
for _, line in zip(range(_lowerCamelCase ) , _lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=5 , _lowerCamelCase=0.0_5 ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCamelCase ) , _lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Dict = example["content"].count("\n" )
_lowerCamelCase : Optional[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = ["def ", "class ", "for ", "while "]
_lowerCamelCase : Union[str, Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=4 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : List[Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Tuple = len(example["content"] ) / len(_lowerCamelCase )
return {"ratio": ratio}
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : str = {}
results.update(get_hash(_lowerCamelCase ) )
results.update(line_stats(_lowerCamelCase ) )
results.update(alpha_stats(_lowerCamelCase ) )
results.update(char_token_ratio(_lowerCamelCase ) )
results.update(is_autogenerated(_lowerCamelCase ) )
results.update(is_config_or_test(_lowerCamelCase ) )
results.update(has_no_keywords(_lowerCamelCase ) )
results.update(has_few_assignments(_lowerCamelCase ) )
return results
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if not check_uniques(_lowerCamelCase , _lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
with open(_lowerCamelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCamelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCamelCase , _lowerCamelCase )
os.unlink(_lowerCamelCase )
# Settings
_lowerCAmelCase : Dict = HfArgumentParser(PreprocessingArguments)
_lowerCAmelCase : List[str] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Optional[Any] = multiprocessing.cpu_count()
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : str = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
_lowerCAmelCase : List[Any] = time.time()
_lowerCAmelCase : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
_lowerCAmelCase : Dict = set(ds.unique('''hash'''))
_lowerCAmelCase : Union[str, Any] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
_lowerCAmelCase : Tuple = time.time()
_lowerCAmelCase : List[Any] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
_lowerCAmelCase : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
_lowerCAmelCase : Optional[int] = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
_lowerCAmelCase : Optional[int] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowerCAmelCase : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
_lowerCAmelCase : Optional[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 46
|
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( lowerCAmelCase__ : float = 0.1 ) -> int:
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , use_stable_embedding=__magic_name__ , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = OpenLlamaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ )
_lowerCAmelCase = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = OpenLlamaModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , )
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = OpenLlamaForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# first forward pass
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , use_cache=__magic_name__ , )
_lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_hidden_states=__magic_name__ , )['hidden_states'][0]
_lowerCAmelCase = model(
__magic_name__ , attention_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )['hidden_states'][0]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : str = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCamelCase : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase : List[str] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : List[str] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OpenLlamaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , hidden_size=3_7 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*__magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(__magic_name__ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = 'single_label_classification'
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(__magic_name__ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = 'multi_label_classification'
_lowerCAmelCase = input_dict['input_ids']
_lowerCAmelCase = input_ids.ne(1 ).to(__magic_name__ )
_lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase = OpenLlamaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_lowerCAmelCase = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ids_tensor([1, 1_0] , config.vocab_size )
_lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = OpenLlamaModel(__magic_name__ )
original_model.to(__magic_name__ )
original_model.eval()
_lowerCAmelCase = original_model(__magic_name__ ).last_hidden_state
_lowerCAmelCase = original_model(__magic_name__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
_lowerCAmelCase = OpenLlamaModel(__magic_name__ )
scaled_model.to(__magic_name__ )
scaled_model.eval()
_lowerCAmelCase = scaled_model(__magic_name__ ).last_hidden_state
_lowerCAmelCase = scaled_model(__magic_name__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-5 ) )
| 717
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Optional[Any] = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a__ : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Union[str, Any] = "mask2former"
UpperCamelCase : Union[str, Any] = ["swin"]
UpperCamelCase : str = {"hidden_size": "hidden_dim"}
def __init__( self , __magic_name__ = None , __magic_name__ = 2_5_6 , __magic_name__ = 2_5_6 , __magic_name__ = 2_5_6 , __magic_name__ = 1_0_2_4 , __magic_name__ = "relu" , __magic_name__ = 6 , __magic_name__ = 1_0 , __magic_name__ = 8 , __magic_name__ = 0.0 , __magic_name__ = 2_0_4_8 , __magic_name__ = False , __magic_name__ = False , __magic_name__ = 4 , __magic_name__ = 2_5_5 , __magic_name__ = 1_0_0 , __magic_name__ = 0.1 , __magic_name__ = 2.0 , __magic_name__ = 5.0 , __magic_name__ = 5.0 , __magic_name__ = 1_2_5_4_4 , __magic_name__ = 3.0 , __magic_name__ = 0.75 , __magic_name__ = 0.02 , __magic_name__ = 1.0 , __magic_name__ = True , __magic_name__ = [4, 8, 1_6, 3_2] , __magic_name__ = None , **__magic_name__ , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
_lowerCAmelCase = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__magic_name__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(__magic_name__ , __magic_name__ ):
_lowerCAmelCase = backbone_config.pop('model_type' )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(__magic_name__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
_lowerCAmelCase = backbone_config
_lowerCAmelCase = feature_size
_lowerCAmelCase = mask_feature_size
_lowerCAmelCase = hidden_dim
_lowerCAmelCase = encoder_feedforward_dim
_lowerCAmelCase = activation_function
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = dim_feedforward
_lowerCAmelCase = pre_norm
_lowerCAmelCase = enforce_input_projection
_lowerCAmelCase = common_stride
_lowerCAmelCase = ignore_value
_lowerCAmelCase = num_queries
_lowerCAmelCase = no_object_weight
_lowerCAmelCase = class_weight
_lowerCAmelCase = mask_weight
_lowerCAmelCase = dice_weight
_lowerCAmelCase = train_num_points
_lowerCAmelCase = oversample_ratio
_lowerCAmelCase = importance_sample_ratio
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = feature_strides
_lowerCAmelCase = output_auxiliary_logits
_lowerCAmelCase = decoder_layers
super().__init__(**__magic_name__ )
@classmethod
def _lowerCamelCase ( cls , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
return cls(
backbone_config=__magic_name__ , **__magic_name__ , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 309
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : List[Any] = u
for i in range(1 , A_ ):
lowerCAmelCase__ : Union[str, Any] = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Optional[int] = int(input('''enter the numbers of values: ''' ) )
lowerCAmelCase__ : list[list[float]] = []
for _ in range(A_ ):
y.append([] )
for i in range(A_ ):
for j in range(A_ ):
y[i].append(A_ )
lowerCAmelCase__ : Optional[int] = 0
print('''enter the values of parameters in a list: ''' )
lowerCAmelCase__ : int = list(map(A_ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A_ ):
lowerCAmelCase__ : Dict = float(input() )
lowerCAmelCase__ : Any = int(input('''enter the value to interpolate: ''' ) )
lowerCAmelCase__ : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , A_ ):
for j in range(n - i ):
lowerCAmelCase__ : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase__ : str = y[0][0]
for i in range(1 , A_ ):
summ += (ucal(A_ , A_ ) * y[0][i]) / math.factorial(A_ )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 450
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
for i in range(len(A_ ) - 1 , 0 , -1 ):
lowerCAmelCase__ : Optional[Any] = False
for j in range(A_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j - 1], unsorted[j]
lowerCAmelCase__ : Dict = True
for j in range(A_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCAmelCase__ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 450
| 1
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase_: Optional[int] = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowercase_: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Any = """https://pypi.org/pypi/diffusers/json"""
snake_case__ : Optional[int] = json.loads(request.urlopen(UpperCAmelCase_).read())["""releases"""].keys()
return sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: version.Version(UpperCAmelCase_))
def _lowercase ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCAmelCase_)
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
snake_case__ : Union[str, Any] = Path(UpperCAmelCase_) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
init_hf_modules()
snake_case__ : List[str] = Path(UpperCAmelCase_) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent)
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
snake_case__ : int = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""") as f:
snake_case__ : List[str] = f.read()
# Imports of the form `import .xxx`
snake_case__ : str = re.findall("""^\s*import\s+\.(\S+)\s*$""" , UpperCAmelCase_ , flags=re.MULTILINE)
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , UpperCAmelCase_ , flags=re.MULTILINE)
# Unique-ify
return list(set(UpperCAmelCase_))
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[Any] = False
snake_case__ : Optional[int] = [module_file]
snake_case__ : str = []
# Let's recurse through all relative imports
while not no_change:
snake_case__ : List[str] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCAmelCase_))
snake_case__ : List[str] = Path(UpperCAmelCase_).parent
snake_case__ : List[Any] = [str(module_path / m) for m in new_imports]
snake_case__ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
snake_case__ : Optional[int] = [F'{f}.py' for f in new_import_files]
snake_case__ : int = len(UpperCAmelCase_) == 0
all_relative_imports.extend(UpperCAmelCase_)
return all_relative_imports
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""") as f:
snake_case__ : Tuple = f.read()
# Imports of the form `import xxx`
snake_case__ : Union[str, Any] = re.findall("""^\s*import\s+(\S+)\s*$""" , UpperCAmelCase_ , flags=re.MULTILINE)
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , UpperCAmelCase_ , flags=re.MULTILINE)
# Only keep the top-level module
snake_case__ : str = [imp.split(""".""")[0] for imp in imports if not imp.startswith(""".""")]
# Unique-ify and test we got them all
snake_case__ : Optional[int] = list(set(UpperCAmelCase_))
snake_case__ : List[str] = []
for imp in imports:
try:
importlib.import_module(UpperCAmelCase_)
except ImportError:
missing_packages.append(UpperCAmelCase_)
if len(UpperCAmelCase_) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'{", ".join(UpperCAmelCase_)}. Run `pip install {" ".join(UpperCAmelCase_)}`')
return get_relative_imports(UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Optional[int] = module_path.replace(os.path.sep , """.""")
snake_case__ : int = importlib.import_module(UpperCAmelCase_)
if class_name is None:
return find_pipeline_class(UpperCAmelCase_)
return getattr(UpperCAmelCase_ , UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
snake_case__ : List[str] = dict(inspect.getmembers(UpperCAmelCase_ , inspect.isclass))
snake_case__ : Union[str, Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , UpperCAmelCase_)
and cls.__module__.split(""".""")[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.')
snake_case__ : List[str] = cls
return pipeline_class
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ):
"""simple docstring"""
snake_case__ : str = str(UpperCAmelCase_)
snake_case__ : Union[str, Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if os.path.isfile(UpperCAmelCase_):
snake_case__ : Dict = module_file_or_url
snake_case__ : List[str] = """local"""
elif pretrained_model_name_or_path.count("""/""") == 0:
snake_case__ : List[str] = get_diffusers_versions()
# cut ".dev0"
snake_case__ : Optional[Any] = """v""" + """.""".join(__version__.split(""".""")[:3])
# retrieve github version that matches
if revision is None:
snake_case__ : List[Any] = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'Defaulting to latest_version: {revision}.')
elif revision in available_versions:
snake_case__ : List[Any] = F'v{revision}'
elif revision == "main":
snake_case__ : str = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"])}.')
# community pipeline on GitHub
snake_case__ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=UpperCAmelCase_ , pipeline=UpperCAmelCase_)
try:
snake_case__ : Union[str, Any] = cached_download(
UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , )
snake_case__ : List[str] = """git"""
snake_case__ : int = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.')
raise
else:
try:
# Load from URL or cache if already cached
snake_case__ : Optional[Any] = hf_hub_download(
UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , )
snake_case__ : List[str] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""")))
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.')
raise
# Check we have all the requirements in our environment
snake_case__ : Optional[int] = check_imports(UpperCAmelCase_)
# Now we move the module inside our cached dynamic modules.
snake_case__ : List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCAmelCase_)
snake_case__ : Dict = Path(UpperCAmelCase_) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCAmelCase_ , submodule_path / module_file)
for module_needed in modules_needed:
snake_case__ : Any = F'{module_needed}.py'
shutil.copy(os.path.join(UpperCAmelCase_ , UpperCAmelCase_) , submodule_path / module_needed)
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
snake_case__ : str = use_auth_token
elif use_auth_token is True:
snake_case__ : List[Any] = HfFolder.get_token()
else:
snake_case__ : str = None
snake_case__ : Dict = model_info(UpperCAmelCase_ , revision=UpperCAmelCase_ , token=UpperCAmelCase_).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
snake_case__ : Dict = submodule_path / commit_hash
snake_case__ : Union[str, Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCAmelCase_)
if not (submodule_path / module_file).exists():
shutil.copy(UpperCAmelCase_ , submodule_path / module_file)
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCAmelCase_ , F'{module_needed}.py' , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
return os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , **UpperCAmelCase_ , ):
"""simple docstring"""
snake_case__ : int = get_cached_module_file(
UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
return get_class_in_module(UpperCAmelCase_ , final_module.replace(""".py""" , """"""))
| 127
|
import re
import string
import numpy as np
import datasets
lowercase_: Optional[Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowercase_: Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowercase_: str = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ (datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def lowercase ( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Optional[int]=None , __a : int=False , __a : Any=False , __a : Dict=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case__ : Union[str, Any] = np.array([re.sub(__a , """""" , __a ) for x in predictions] )
snake_case__ : Union[str, Any] = np.array([re.sub(__a , """""" , __a ) for x in references] )
else:
snake_case__ : List[str] = np.asarray(__a )
snake_case__ : int = np.asarray(__a )
if ignore_case:
snake_case__ : str = np.char.lower(__a )
snake_case__ : Tuple = np.char.lower(__a )
if ignore_punctuation:
snake_case__ : str = string.punctuation.maketrans("""""" , """""" , string.punctuation )
snake_case__ : List[Any] = np.char.translate(__a , table=__a )
snake_case__ : Tuple = np.char.translate(__a , table=__a )
if ignore_numbers:
snake_case__ : Union[str, Any] = string.digits.maketrans("""""" , """""" , string.digits )
snake_case__ : Dict = np.char.translate(__a , table=__a )
snake_case__ : int = np.char.translate(__a , table=__a )
snake_case__ : Any = predictions == references
return {"exact_match": np.mean(__a ) * 1_0_0}
| 127
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = '''▁'''
lowercase_ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowercase_ = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
lowercase_ = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
lowercase_ = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__( self : int , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple=None , snake_case_ : List[str]="<s>" , snake_case_ : List[str]="</s>" , snake_case_ : Optional[Any]="</s>" , snake_case_ : Dict="<pad>" , snake_case_ : Any="<unk>" , snake_case_ : Tuple="m2m100" , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : Dict=8 , **snake_case_ : List[Any] , )-> None:
__lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase =language_codes
__lowerCAmelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__lowerCAmelCase ={lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
__lowerCAmelCase =kwargs.get("""additional_special_tokens""" , [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_)
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
__lowerCAmelCase =vocab_file
__lowerCAmelCase =load_json(snake_case_)
__lowerCAmelCase ={v: k for k, v in self.encoder.items()}
__lowerCAmelCase =spm_file
__lowerCAmelCase =load_spm(snake_case_ , self.sp_model_kwargs)
__lowerCAmelCase =len(self.encoder)
__lowerCAmelCase ={
self.get_lang_token(snake_case_): self.encoder_size + i for i, lang_code in enumerate(snake_case_)
}
__lowerCAmelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_)}
__lowerCAmelCase ={v: k for k, v in self.lang_token_to_id.items()}
__lowerCAmelCase =src_lang if src_lang is not None else """en"""
__lowerCAmelCase =tgt_lang
__lowerCAmelCase =self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
__lowerCAmelCase =num_madeup_words
@property
def UpperCamelCase ( self : int)-> int:
return len(self.encoder) + len(self.lang_token_to_id)
@property
def UpperCamelCase ( self : Optional[Any])-> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self : List[Any] , snake_case_ : str)-> None:
__lowerCAmelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCamelCase ( self : str , snake_case_ : str)-> List[str]:
return self.sp_model.encode(snake_case_ , out_type=snake_case_)
def UpperCamelCase ( self : Optional[Any] , snake_case_ : List[str])-> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token])
def UpperCamelCase ( self : Dict , snake_case_ : int)-> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token)
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : Tuple)-> str:
__lowerCAmelCase =[]
__lowerCAmelCase =""""""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_) + token
__lowerCAmelCase =[]
else:
current_sub_tokens.append(snake_case_)
out_string += self.sp_model.decode(snake_case_)
return out_string.strip()
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False)-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_)
__lowerCAmelCase =[1] * len(self.prefix_tokens)
__lowerCAmelCase =[1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_)) + suffix_ones
return prefix_ones + ([0] * len(snake_case_)) + ([0] * len(snake_case_)) + suffix_ones
def UpperCamelCase ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None)-> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self : Optional[Any])-> Dict:
__lowerCAmelCase ={self.convert_ids_to_tokens(snake_case_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Tuple)-> Dict:
__lowerCAmelCase =self.__dict__.copy()
__lowerCAmelCase =None
return state
def __setstate__( self : List[Any] , snake_case_ : Dict)-> None:
__lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
__lowerCAmelCase ={}
__lowerCAmelCase =load_spm(self.spm_file , self.sp_model_kwargs)
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None)-> Tuple[str]:
__lowerCAmelCase =Path(snake_case_)
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""")
__lowerCAmelCase =save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__lowerCAmelCase =save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_)
if os.path.abspath(self.spm_file) != os.path.abspath(snake_case_) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , snake_case_)
elif not os.path.isfile(self.spm_file):
with open(snake_case_ , """wb""") as fi:
__lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(snake_case_)
return (str(snake_case_), str(snake_case_))
def UpperCamelCase ( self : str , snake_case_ : List[str] , snake_case_ : str = "en" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "ro" , **snake_case_ : int , )-> BatchEncoding:
__lowerCAmelCase =src_lang
__lowerCAmelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_)
def UpperCamelCase ( self : Optional[int] , snake_case_ : int , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : List[str])-> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
__lowerCAmelCase =src_lang
__lowerCAmelCase =self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_)
__lowerCAmelCase =self.get_lang_id(snake_case_)
__lowerCAmelCase =tgt_lang_id
return inputs
def UpperCamelCase ( self : Optional[int])-> Union[str, Any]:
self.set_src_lang_special_tokens(self.src_lang)
def UpperCamelCase ( self : Dict)-> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCamelCase ( self : Union[str, Any] , snake_case_ : str)-> None:
__lowerCAmelCase =self.get_lang_token(snake_case_)
__lowerCAmelCase =self.lang_token_to_id[lang_token]
__lowerCAmelCase =[self.cur_lang_id]
__lowerCAmelCase =[self.eos_token_id]
def UpperCamelCase ( self : str , snake_case_ : str)-> None:
__lowerCAmelCase =self.get_lang_token(snake_case_)
__lowerCAmelCase =self.lang_token_to_id[lang_token]
__lowerCAmelCase =[self.cur_lang_id]
__lowerCAmelCase =[self.eos_token_id]
def UpperCamelCase ( self : int , snake_case_ : str)-> str:
return self.lang_code_to_token[lang]
def UpperCamelCase ( self : List[str] , snake_case_ : str)-> int:
__lowerCAmelCase =self.get_lang_token(snake_case_)
return self.lang_token_to_id[lang_token]
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__lowerCAmelCase =sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def __lowerCAmelCase ( __lowerCamelCase : str ) -> Union[Dict, List]:
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : str ) -> None:
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=2 )
| 354
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "distilbert"
SCREAMING_SNAKE_CASE = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : Optional[Any] , snake_case_ : Tuple=3_05_22 , snake_case_ : Any=5_12 , snake_case_ : Dict=False , snake_case_ : Tuple=6 , snake_case_ : Union[str, Any]=12 , snake_case_ : List[str]=7_68 , snake_case_ : Optional[Any]=4 * 7_68 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : Any=0.0_2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=0.2 , snake_case_ : Optional[Any]=0 , **snake_case_ : List[Any] , )-> Tuple:
__lowerCAmelCase =vocab_size
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =sinusoidal_pos_embds
__lowerCAmelCase =n_layers
__lowerCAmelCase =n_heads
__lowerCAmelCase =dim
__lowerCAmelCase =hidden_dim
__lowerCAmelCase =dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =activation
__lowerCAmelCase =initializer_range
__lowerCAmelCase =qa_dropout
__lowerCAmelCase =seq_classif_dropout
super().__init__(**snake_case_ , pad_token_id=snake_case_)
class __a ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : Union[str, Any])-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 354
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
A = list[list[float | int]]
def __A ( a_ :Matrix , a_ :Matrix) -> Matrix:
__a : int = len(a_)
__a : Matrix = [[0 for _ in range(size + 1)] for _ in range(a_)]
__a : int
__a : int
__a : int
__a : int
__a : int
__a : float
for row in range(a_):
for col in range(a_):
__a : Any = matrix[row][col]
__a : Optional[Any] = vector[row][0]
__a : int = 0
__a : int = 0
while row < size and col < size:
# pivoting
__a : int = max((abs(augmented[rowa][col]), rowa) for rowa in range(a_ , a_))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__a , __a : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , a_):
__a : Tuple = augmented[rowa][col] / augmented[row][col]
__a : Union[str, Any] = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , a_):
for row in range(a_):
__a : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(a_ , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(a_)
]
def __A ( a_ :list[int]) -> Callable[[int], int]:
__a : int = len(a_)
__a : Matrix = [[0 for _ in range(a_)] for _ in range(a_)]
__a : Matrix = [[0] for _ in range(a_)]
__a : Matrix
__a : int
__a : int
__a : int
for x_val, y_val in enumerate(a_):
for col in range(a_):
__a : str = (x_val + 1) ** (size - col - 1)
__a : Dict = y_val
__a : Union[str, Any] = solve(a_ , a_)
def interpolated_func(a_ :int) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(a_))
return interpolated_func
def __A ( a_ :int) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __A ( a_ :Callable[[int], int] = question_function , a_ :int = 10) -> int:
__a : list[int] = [func(a_) for x_val in range(1 , order + 1)]
__a : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
__a : int = 0
__a : Callable[[int], int]
__a : int
for poly in polynomials:
__a : Tuple = 1
while func(a_) == poly(a_):
x_val += 1
ret += poly(a_)
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 101
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def __A ( a_ :int) -> typing.Counter[int]:
__a : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1):
for perpendicular in range(a_ , max_perimeter + 1):
__a : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a_):
__a : List[Any] = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __A ( a_ :int = 10_00) -> int:
__a : Dict = pythagorean_triple(a_)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 101
| 1
|
"""simple docstring"""
from math import isqrt
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(__snake_case ) + 1 ) )
def __A (_SCREAMING_SNAKE_CASE = 10**6 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = 0
lowerCAmelCase__ :Dict = 1
lowerCAmelCase__ :List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(__snake_case )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : int = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='mvp'
lowercase : List[str] =['past_key_values']
lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =classifier_dropout
lowerCamelCase_ =use_cache
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =use_prompt
lowerCamelCase_ =prompt_length
lowerCamelCase_ =prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ):
lowerCamelCase_ =self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 676
| 0
|
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A__ ( lowercase: bool = True, *lowercase: Any, **lowercase: List[Any] ) -> List[str]:
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
A : Any =False
if main_process_only:
A : Union[str, Any] =PartialState().local_process_index == 0
return _tqdm(*__a, **__a, disable=__a )
| 720
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
A : Dict =tempfile.mkdtemp()
A : int =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : str =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Optional[int] =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
A : Optional[int] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : str =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Union[str, Any] =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
A : Optional[Any] =self.get_image_processor()
A : Optional[Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Dict =self.prepare_image_inputs()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : Optional[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
A : str =self.get_image_processor()
A : Union[str, Any] =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : str =[torch.ones((1, 3, 5, 5) )]
A : Optional[Any] =[[17_64, 26_46]]
A : List[Any] =[[6_83, 10_24]]
A : Union[str, Any] =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : str =[np.ones((1, 3, 5, 5) )]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
A : Any =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : str ) -> str:
A : Tuple =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Union[str, Any] =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : Optional[Any] =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Any =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
A : Optional[Any] =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[Any] =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A : Dict =SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
A : Any =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : Tuple =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' )
A : List[Any] =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
A : int =self.get_image_processor()
A : Any =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =[tf.ones((1, 3, 5, 5) )]
A : Tuple =[[17_64, 26_46]]
A : Union[str, Any] =[[6_83, 10_24]]
A : int =processor.post_process_masks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : List[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
A : Any =[np.ones((1, 3, 5, 5) )]
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
A : Any =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : List[str] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , np.array(SCREAMING_SNAKE_CASE__ ) , np.array(SCREAMING_SNAKE_CASE__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Optional[int] =tempfile.mkdtemp()
A : Union[str, Any] =SamImageProcessor()
A : Dict =SamProcessor(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
A : Any =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A : Tuple =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
A : Optional[Any] =self.get_image_processor()
A : Dict =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : Optional[int] =np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Optional[int] =[tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )]
A : Union[str, Any] =[torch.tensor(SCREAMING_SNAKE_CASE__ )]
A : int =[[17_64, 26_46]]
A : int =[[6_83, 10_24]]
A : Dict =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
A : Optional[Any] =processor.post_process_masks(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
A : Union[str, Any] =self.get_image_processor()
A : int =SamProcessor(image_processor=SCREAMING_SNAKE_CASE__ )
A : int =self.prepare_image_inputs()
A : List[Any] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Tuple =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )['pixel_values'].numpy()
A : Optional[int] =image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
A : Dict =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 661
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowercase : int =ksize + 1
lowercase : str =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__magic_name__ ):
for x in range(__magic_name__ ):
# distance from center
lowercase : Optional[Any] =x - ksize // 2
lowercase : List[str] =y - ksize // 2
# degree to radiant
lowercase : Optional[int] =theta / 180 * np.pi
lowercase : Union[str, Any] =np.cos(_theta )
lowercase : Optional[int] =np.sin(_theta )
# get kernel x
lowercase : Tuple =cos_theta * px + sin_theta * py
# get kernel y
lowercase : Dict =-sin_theta * px + cos_theta * py
# fill kernel
lowercase : str =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCamelCase_ = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
UpperCamelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCamelCase_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCamelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCamelCase_ = out / out.max() * 255
UpperCamelCase_ = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 92
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase : str = logging.getLogger(__name__)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=-1 ):
'''simple docstring'''
__a : Tuple = label_idx
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
__a : Any = mode.value
__a : List[Any] = os.path.join(__a , f"""{mode}.txt""" )
__a : Optional[Any] = 1
__a : str = []
with open(__a , encoding='utf-8' ) as f:
__a : Tuple = []
__a : Dict = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
__a : str = []
__a : int = []
else:
__a : Optional[int] = line.split(' ' )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__a : Tuple = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
__a : Any = f.read().splitlines()
if "O" not in labels:
__a : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
__a : Any = f.read().splitlines()
if "O" not in labels:
__a : List[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
__a : Dict = mode.value
__a : List[str] = os.path.join(__a , f"""{mode}.txt""" )
__a : Tuple = 1
__a : List[str] = []
with open(__a , encoding='utf-8' ) as f:
for sentence in parse_incr(__a ):
__a : Any = []
__a : Optional[int] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = 0
for sentence in parse_incr(__a ):
__a : int = preds_list[example_id]
__a : str = ''
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__a )
example_id += 1
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 476
| 0
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
a_ :int = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
a_ :Union[str, Any] = 3e8 # unit of c : m * s^-1
def lowercase_ (A : str , A : Any , A : Tuple ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
snake_case__ : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
snake_case__ : Any = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
snake_case__ : int = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
_SCREAMING_SNAKE_CASE = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_SCREAMING_SNAKE_CASE = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_SCREAMING_SNAKE_CASE = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE = False
@property
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
return 3_2
@property
def lowercase_ ( self : int ) ->str:
return 3_2
@property
def lowercase_ ( self : Any ) ->List[str]:
return self.time_input_dim
@property
def lowercase_ ( self : Optional[Any] ) ->str:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Tuple ) ->int:
return 1_0_0
@property
def lowercase_ ( self : str ) ->Dict:
snake_case__ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self : Any ) ->Optional[int]:
torch.manual_seed(0 )
snake_case__ : str = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_0_0_5, )
snake_case__ : Optional[Any] = MultilingualCLIP(_snake_case )
snake_case__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : Tuple ) ->Optional[int]:
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Dict = UNetaDConditionModel(**_snake_case )
return model
@property
def lowercase_ ( self : Dict ) ->Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Union[str, Any] ) ->List[Any]:
torch.manual_seed(0 )
snake_case__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : Any ) ->Any:
snake_case__ : int = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : Any = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : int = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, clip_sample=_snake_case, set_alpha_to_one=_snake_case, steps_offset=1, prediction_type='epsilon', thresholding=_snake_case, )
snake_case__ : Optional[int] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self : str, _snake_case : Any, _snake_case : int=0 ) ->str:
snake_case__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : str = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
snake_case__ : Tuple = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : Optional[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
snake_case__ : Tuple = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : Any = np.ones((6_4, 6_4), dtype=np.floataa )
snake_case__ : Optional[Any] = 0
if str(_snake_case ).startswith('mps' ):
snake_case__ : Union[str, Any] = torch.manual_seed(_snake_case )
else:
snake_case__ : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case__ : int = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : int = 'cpu'
snake_case__ : str = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**_snake_case )
snake_case__ : Optional[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case__ : List[Any] = output.images
snake_case__ : List[Any] = pipe(
**self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : int = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Any = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowercase_ ( self : Any ) ->List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Dict ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case__ : Union[str, Any] = np.ones((7_6_8, 7_6_8), dtype=np.floataa )
snake_case__ : str = 0
snake_case__ : List[str] = 'a hat'
snake_case__ : Any = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case__ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint', torch_dtype=torch.floataa )
snake_case__ : Tuple = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case__ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
_snake_case, generator=_snake_case, num_inference_steps=5, negative_prompt='', ).to_tuple()
snake_case__ : Optional[Any] = pipeline(
_snake_case, image=_snake_case, mask_image=_snake_case, image_embeds=_snake_case, negative_image_embeds=_snake_case, generator=_snake_case, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type='np', )
snake_case__ : Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_snake_case, _snake_case )
| 243
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase__ :Any = """Create a default config file for Accelerate with only a few flags set."""
def __lowercase (_lowercase="no", _lowercase = default_json_config_file, _lowercase = False ) -> str:
"""simple docstring"""
__lowerCamelCase : int = Path(_lowercase )
path.parent.mkdir(parents=_lowercase, exist_ok=_lowercase )
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
__lowerCamelCase : Dict = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
__lowerCamelCase : Optional[Any] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
__lowerCamelCase : Union[str, Any] = torch.cuda.device_count()
__lowerCamelCase : Union[str, Any] = num_gpus
__lowerCamelCase : List[str] = False
if num_gpus > 1:
__lowerCamelCase : Tuple = """MULTI_GPU"""
else:
__lowerCamelCase : Tuple = """NO"""
elif is_xpu_available() and use_xpu:
__lowerCamelCase : int = torch.xpu.device_count()
__lowerCamelCase : List[Any] = num_xpus
__lowerCamelCase : Any = False
if num_xpus > 1:
__lowerCamelCase : Dict = """MULTI_XPU"""
else:
__lowerCamelCase : Any = """NO"""
elif is_npu_available():
__lowerCamelCase : int = torch.npu.device_count()
__lowerCamelCase : int = num_npus
__lowerCamelCase : List[str] = False
if num_npus > 1:
__lowerCamelCase : Dict = """MULTI_NPU"""
else:
__lowerCamelCase : Dict = """NO"""
else:
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : Dict = """NO"""
__lowerCamelCase : str = ClusterConfig(**_lowercase )
config.to_json_file(_lowercase )
return path
def __lowercase (_lowercase, _lowercase ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase : List[Any] = parser.add_parser("""default""", parents=_lowercase, help=_lowercase, formatter_class=_lowercase )
parser.add_argument(
"""--config_file""", default=_lowercase, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
), dest="""save_location""", )
parser.add_argument(
"""--mixed_precision""", choices=["""no""", """fp16""", """bf16"""], type=_lowercase, help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""", default="""no""", )
parser.set_defaults(func=_lowercase )
return parser
def __lowercase (_lowercase ) -> str:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(f"accelerate configuration saved at {config_file}" )
| 150
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ :List[Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ :List[str] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ :Dict = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ :List[str] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ :Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150
| 1
|
from __future__ import annotations
import math
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(A__ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , )
def _lowerCAmelCase ( ):
lowercase__ = [90, 23, 6, 33, 21, 65, 123, 34_423]
lowercase__ = math.log(len(A__ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , A__ , A__ , A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowercase = None
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Any="</s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : List[str]="<pad>" , __UpperCamelCase : Any=1_0_0 , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Tuple , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase_ = [F'''<extra_id_{i}>''' for i in range(__UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCamelCase_ = len(set(filter(lambda __UpperCamelCase : bool("""extra_id_""" in str(__UpperCamelCase ) ) , __UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , extra_ids=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = extra_ids
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCamelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCamelCase , )
return max_model_length
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCamelCase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
lowerCamelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self : Any ):
return list(
set(filter(lambda __UpperCamelCase : bool(re.search(R"""<extra_id_\d+>""" , __UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self : List[str] ):
return [self.convert_tokens_to_ids(__UpperCamelCase ) for token in self.get_sentinel_tokens()]
| 272
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowercase = {'''UserAgent''': UserAgent().random}
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] ) -> dict:
lowerCamelCase_ = script.contents[0]
lowerCamelCase_ = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __A:
def __init__( self : int , __UpperCamelCase : List[str] ):
lowerCamelCase_ = F'''https://www.instagram.com/{username}/'''
lowerCamelCase_ = self.get_json()
def lowercase__ ( self : List[str] ):
lowerCamelCase_ = requests.get(self.url , headers=__UpperCamelCase ).text
lowerCamelCase_ = BeautifulSoup(__UpperCamelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Dict ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : Optional[Any] ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase__ ( self : Dict ):
return self.user_data["username"]
@property
def lowercase__ ( self : List[Any] ):
return self.user_data["full_name"]
@property
def lowercase__ ( self : Any ):
return self.user_data["biography"]
@property
def lowercase__ ( self : int ):
return self.user_data["business_email"]
@property
def lowercase__ ( self : List[str] ):
return self.user_data["external_url"]
@property
def lowercase__ ( self : List[Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase__ ( self : str ):
return self.user_data["edge_follow"]["count"]
@property
def lowercase__ ( self : List[str] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase__ ( self : List[str] ):
return self.user_data["profile_pic_url_hd"]
@property
def lowercase__ ( self : Optional[Any] ):
return self.user_data["is_verified"]
@property
def lowercase__ ( self : int ):
return self.user_data["is_private"]
def __lowerCAmelCase ( UpperCAmelCase__ : str = "github" ) -> None:
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCamelCase_ = InstagramUser(UpperCAmelCase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCAmelCase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = InstagramUser('''github''')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 272
| 1
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''efficientformer'''
def __init__( self , _lowerCamelCase = [3, 2, 6, 4] , _lowerCamelCase = [48, 96, 224, 448] , _lowerCamelCase = [True, True, True, True] , _lowerCamelCase = 448 , _lowerCamelCase = 32 , _lowerCamelCase = 4 , _lowerCamelCase = 7 , _lowerCamelCase = 5 , _lowerCamelCase = 8 , _lowerCamelCase = 4 , _lowerCamelCase = 0.0 , _lowerCamelCase = 16 , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = 2 , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = 1 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = 1e-5 , _lowerCamelCase = "gelu" , _lowerCamelCase = 0.02 , _lowerCamelCase = 1e-12 , _lowerCamelCase = 224 , _lowerCamelCase = 1e-05 , **_lowerCamelCase , ) -> None:
super().__init__(**_lowerCamelCase )
A_ : List[str] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = hidden_sizes
A_ : str = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Union[str, Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : Optional[Any] = patch_size
A_ : Dict = num_channels
A_ : int = depths
A_ : List[Any] = mlp_expansion_ratio
A_ : Dict = downsamples
A_ : Dict = dim
A_ : List[Any] = key_dim
A_ : Union[str, Any] = attention_ratio
A_ : str = resolution
A_ : Optional[Any] = pool_size
A_ : Any = downsample_patch_size
A_ : Union[str, Any] = downsample_stride
A_ : Tuple = downsample_pad
A_ : str = drop_path_rate
A_ : Optional[Any] = num_metaad_blocks
A_ : Union[str, Any] = distillation
A_ : Optional[int] = use_layer_scale
A_ : str = layer_scale_init_value
A_ : str = image_size
A_ : Tuple = batch_norm_eps
| 385
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : int = create_tensor(a_ )
A_ : Any = gather(a_ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[str] = [state.process_index]
A_ : Optional[Any] = gather_object(a_ )
assert len(a_ ) == state.num_processes, F"{gathered_obj}, {len(a_ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
A_ : List[str] = create_tensor(a_ )
A_ : Optional[Any] = broadcast(a_ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if state.is_main_process:
A_ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : Any = torch.arange(state.num_processes ).to(state.device )
A_ : Union[str, Any] = pad_across_processes(a_ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
A_ : Tuple = create_tensor(a_ )
A_ : Optional[Any] = reduce(a_ , """sum""" )
A_ : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(a_ , a_ ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
A_ : str = create_tensor(a_ )
A_ : int = reduce(a_ , """mean""" )
A_ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(a_ , a_ ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
main()
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ : Union[str, Any] = PartialState()
state.print(F"State: {state}" )
state.print("""testing gather""" )
test_gather(a_ )
state.print("""testing gather_object""" )
test_gather_object(a_ )
state.print("""testing broadcast""" )
test_broadcast(a_ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(a_ )
state.print("""testing reduce_sum""" )
test_reduce_sum(a_ )
state.print("""testing reduce_mean""" )
test_reduce_mean(a_ )
if __name__ == "__main__":
main()
| 385
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( _lowercase ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
with open(UpperCamelCase__ , encoding='utf-8' ) as input_file:
A = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
A = input_file.read()
A = regexp.search(UpperCamelCase__ )
return match
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Any ):
with open(UpperCamelCase__ , encoding='utf-8' ) as input_file:
A = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
A = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
A = regexp.finditer(UpperCamelCase__ )
A = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase ( self : Optional[Any] ):
A = Path('./datasets' )
A = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCamelCase__ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def UpperCamelCase ( self : str ):
A = Path('./datasets' )
A = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCamelCase__ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 699
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowercase : str = logging.get_logger(__name__)
__lowercase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Any = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowercase : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__lowercase : Optional[int] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any = RoFormerTokenizer
def __init__(self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
lowerCamelCase_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , A ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , A ) != strip_accents
):
lowerCamelCase_ : Any = getattr(A , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : List[Any] = strip_accents
lowerCamelCase_ : Any = pre_tok_class(**A )
lowerCamelCase_ : str = do_lower_case
def __getstate__(self ):
lowerCamelCase_ : Optional[Any] = self.__dict__.copy()
lowerCamelCase_ : List[Any] = BertPreTokenizer()
return state
def __setstate__(self , A ):
lowerCamelCase_ : str = d
lowerCamelCase_ : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
lowerCamelCase_ : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(A ) )
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Optional[int] = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def UpperCAmelCase__ (self , A , A=None , A=None , A=False , **A , ):
lowerCamelCase_ : str = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A )
| 422
| 0
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> str:
return " ".join(
''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 327
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__UpperCamelCase = {'''facebook/blenderbot-3B''': 128}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = BlenderbotTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> str:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , lowerCAmelCase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
SCREAMING_SNAKE_CASE = value
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def __A ( self , lowerCAmelCase__ ) -> List[int]:
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = ' '.join(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 327
| 1
|
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 31
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : int = CTRLTokenizer
A_ : List[str] = False
A_ : str = False
def __UpperCamelCase ( self : Any ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
A = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
A = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[Any] , **__UpperCamelCase : List[str] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCamelCase ( self : int , __UpperCamelCase : Optional[int] ) -> Optional[Any]:
A = 'adapt react readapt apt'
A = 'adapt react readapt apt'
return input_text, output_text
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
A = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A = 'adapt react readapt apt'
A = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
A = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
A = tokens + [tokenizer.unk_token]
A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 106
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Tuple = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "nllb-moe"
__UpperCamelCase : List[Any] = ["past_key_values"]
__UpperCamelCase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any]=1_2_8_1_1_2 , SCREAMING_SNAKE_CASE :Optional[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE :str=1_2 , SCREAMING_SNAKE_CASE :Dict=4_0_9_6 , SCREAMING_SNAKE_CASE :List[str]=1_6 , SCREAMING_SNAKE_CASE :List[Any]=0.05 , SCREAMING_SNAKE_CASE :Tuple=0.05 , SCREAMING_SNAKE_CASE :Any=True , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :int="relu" , SCREAMING_SNAKE_CASE :List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE :int=0.0 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :List[str]=2 , SCREAMING_SNAKE_CASE :Optional[int]=True , SCREAMING_SNAKE_CASE :Any=False , SCREAMING_SNAKE_CASE :Optional[Any]="float32" , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE :Tuple=6_4 , SCREAMING_SNAKE_CASE :Any=4 , SCREAMING_SNAKE_CASE :Dict=4 , SCREAMING_SNAKE_CASE :Optional[Any]=0.001 , SCREAMING_SNAKE_CASE :Dict=0.001 , SCREAMING_SNAKE_CASE :List[Any]="all" , SCREAMING_SNAKE_CASE :List[str]=False , SCREAMING_SNAKE_CASE :Optional[Any]=False , SCREAMING_SNAKE_CASE :Tuple=1.0 , SCREAMING_SNAKE_CASE :str=0.2 , SCREAMING_SNAKE_CASE :List[str]=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[str]=2 , SCREAMING_SNAKE_CASE :Any=False , **SCREAMING_SNAKE_CASE :Dict , ) -> Tuple:
'''simple docstring'''
_a : Any =vocab_size
_a : Tuple =max_position_embeddings
_a : int =d_model
_a : List[Any] =encoder_ffn_dim
_a : List[Any] =encoder_layers
_a : Union[str, Any] =encoder_attention_heads
_a : Tuple =decoder_ffn_dim
_a : List[Any] =decoder_layers
_a : List[Any] =decoder_attention_heads
_a : Union[str, Any] =dropout
_a : List[str] =attention_dropout
_a : Optional[int] =activation_dropout
_a : Any =activation_function
_a : List[Any] =init_std
_a : Optional[int] =encoder_layerdrop
_a : Union[str, Any] =decoder_layerdrop
_a : List[str] =use_cache
_a : Tuple =encoder_layers
_a : Optional[int] =scale_embedding # scale factor will be sqrt(d_model) if True
_a : Union[str, Any] =router_z_loss_coef
_a : List[Any] =router_aux_loss_coef
_a : Optional[Any] =decoder_sparse_step
_a : List[Any] =encoder_sparse_step
_a : Any =num_experts
_a : Dict =expert_capacity
_a : Tuple =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
_a : Any =router_dtype
_a : Union[str, Any] =router_ignore_padding_tokens
_a : Dict =batch_prioritized_routing
_a : Tuple =second_expert_policy
_a : Optional[int] =normalize_router_prob_before_dropping
_a : str =moe_eval_capacity_token_fraction
_a : Union[str, Any] =moe_token_dropout
_a : Tuple =output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 506
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = (DPMSolverSinglestepScheduler,)
__UpperCamelCase : str = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self :Optional[Any] , **SCREAMING_SNAKE_CASE :int ) -> str:
'''simple docstring'''
_a : Optional[Any] ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Optional[Any]=0 , **SCREAMING_SNAKE_CASE :str ) -> Any:
'''simple docstring'''
_a : Any =dict(self.forward_default_kwargs )
_a : Any =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : Tuple =self.dummy_sample
_a : Optional[Any] =0.1 * sample
_a : Dict =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Tuple =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : str =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : Dict =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : List[str] =dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a : str =sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : Union[str, Any] =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any]=0 , **SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
'''simple docstring'''
_a : List[str] =dict(self.forward_default_kwargs )
_a : Dict =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.dummy_sample
_a : int =0.1 * sample
_a : Optional[int] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] =self.get_scheduler_config()
_a : str =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
_a : Union[str, Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
_a : Any =dummy_past_residuals[: new_scheduler.config.solver_order]
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : Tuple =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> Any:
'''simple docstring'''
if scheduler is None:
_a : int =self.scheduler_classes[0]
_a : int =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : int =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : List[str] =self.scheduler_classes[0]
_a : Union[str, Any] =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : List[str] =1_0
_a : Optional[Any] =self.dummy_model()
_a : int =self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : str =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : int =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : List[Any] =5_0
_a : Optional[Any] =self.dummy_model()
_a : List[Any] =self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
_a : Optional[int] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a : List[str] =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : List[Any] =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
_a : Dict =DEISMultistepScheduler.from_config(scheduler.config )
_a : Union[str, Any] =DPMSolverMultistepScheduler.from_config(scheduler.config )
_a : str =UniPCMultistepScheduler.from_config(scheduler.config )
_a : Optional[Any] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a : Dict =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : List[str] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , algorithm_type="""dpmsolver++""" , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , algorithm_type=SCREAMING_SNAKE_CASE , )
_a : List[Any] =self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , algorithm_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE )
self.check_over_configs(variance_type="""learned_range""" )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.full_loop()
_a : Any =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : Dict =self.full_loop(use_karras_sigmas=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
_a : Optional[int] =self.full_loop(prediction_type="""v_prediction""" )
_a : Optional[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=SCREAMING_SNAKE_CASE )
_a : Dict =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : Dict =self.scheduler_classes[0]
_a : str =self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
_a : Optional[int] =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =1_0
_a : Any =self.dummy_model()
_a : int =self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : Tuple =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 506
| 1
|
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 216
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__UpperCAmelCase =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
A__ = eval_examples
A__ = post_process_function
A__ = quant_trainer_args
A__ = 1_28 # default number of calibration samples
def lowercase_ ( self , UpperCamelCase__=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
A__ = calib_dataset if calib_dataset is not None else self.calib_dataset
A__ = self._remove_unused_columns(UpperCamelCase__ , description="Calibration" )
return DataLoader(
UpperCamelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase__ , )
def lowercase_ ( self , UpperCamelCase__=None ):
'''simple docstring'''
A__ = self.train_dataset if calib_dataset is None else calib_dataset
A__ = self.get_calib_dataloader(UpperCamelCase__ )
A__ = self.model
quant_trainer.configure_model(UpperCamelCase__ , self.quant_trainer_args , calib=UpperCamelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase__ )
logger.info("***** Running calibration *****" )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCamelCase__ ):
# Prediction step
A__ , A__ , A__ = self.prediction_step(UpperCamelCase__ , UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase__ , self.quant_trainer_args )
A__ = model
def lowercase_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = "eval" ):
'''simple docstring'''
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase__ )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , )
finally:
A__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
A__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
A__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase__ )
self.log(UpperCamelCase__ )
else:
A__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__ = "test" ):
'''simple docstring'''
A__ = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , )
finally:
A__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , "predict" )
A__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__="./" ):
'''simple docstring'''
A__ = self.eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase__ )
A__ = next(iter(UpperCamelCase__ ) )
# saving device - to make it consistent
A__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
A__ = tuple(v.to(UpperCamelCase__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
A__ = True
A__ = self.model.to(UpperCamelCase__ )
model.eval()
model.float()
A__ = model.module if hasattr(UpperCamelCase__ , "module" ) else model
quant_trainer.configure_model(UpperCamelCase__ , self.quant_trainer_args )
A__ = os.path.join(UpperCamelCase__ , "model.onnx" )
logger.info(f"""exporting model to {output_model_file}""" )
A__ = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , export_params=UpperCamelCase__ , opset_version=13 , do_constant_folding=UpperCamelCase__ , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
} , verbose=UpperCamelCase__ , )
logger.info("onnx export finished" )
| 337
| 0
|
"""simple docstring"""
from __future__ import annotations
lowercase__ = 8.988E9 # units = N * m^s * C^-2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->dict[str, float]:
a__: Optional[int] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
a__: Optional[Any] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
a__: List[str] = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
a__: List[Any] = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
a__: List[str] = (COULOMBS_CONSTANT * charge_product / abs(_SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = BarthezTokenizer
a__ = BarthezTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
super().setUp()
a__: List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowercase)
a__: List[str] = tokenizer
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = '<pad>'
a__: Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_11_22)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22)
@require_torch
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__: int = [0, 57, 30_18, 7_03_07, 91, 2]
a__: Optional[int] = self.tokenizer(
lowercase , max_length=len(lowercase) , padding=lowercase , truncation=lowercase , return_tensors='pt')
self.assertIsInstance(lowercase , lowercase)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
a__: Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: int = self.get_tokenizer()
a__: Union[str, Any] = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: int = tokenizer.tokenize(lowercase)
a__: str = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: int = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Union[str, Any] = self.get_rust_tokenizer()
a__: Optional[Any] = tokenizer.encode(lowercase)
a__: int = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Tuple = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowercase , )
| 217
| 0
|
'''simple docstring'''
from math import isqrt, loga
def __UpperCAmelCase (lowercase__ ) -> list[int]:
'''simple docstring'''
a_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,lowercase__ ,lowercase__ ):
a_ = False
return [i for i in range(2 ,lowercase__ ) if is_prime[i]]
def __UpperCAmelCase (lowercase__ = 800800 ,lowercase__ = 800800 ) -> int:
'''simple docstring'''
a_ = degree * loga(lowercase__ )
a_ = int(lowercase__ )
a_ = calculate_prime_numbers(lowercase__ )
a_ = 0
a_ = 0
a_ = len(lowercase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 685
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: str=False ,__lowerCAmelCase: Optional[int]=10 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: Any=32 * 8 ,__lowerCAmelCase: Union[str, Any]=32 * 8 ,__lowerCAmelCase: Optional[Any]=4 ,__lowerCAmelCase: int=64 ,):
'''simple docstring'''
_lowerCamelCase : str = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : str = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Optional[Any] = num_queries
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[Any] = min_size
_lowerCamelCase : str = max_size
_lowerCamelCase : str = num_labels
_lowerCamelCase : List[str] = hidden_dim
_lowerCamelCase : Dict = hidden_dim
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__lowerCAmelCase )
_lowerCamelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Any = (torch.rand((self.batch_size, self.num_labels) ,device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_lowerCamelCase : Dict = self.num_queries
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : int = [1, 1, 1, 1]
_lowerCamelCase : List[str] = self.num_channels
_lowerCamelCase : Tuple = 64
_lowerCamelCase : Optional[Any] = 128
_lowerCamelCase : str = self.hidden_dim
_lowerCamelCase : int = self.hidden_dim
_lowerCamelCase : int = self.hidden_dim
return config
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCamelCase : Dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = output.encoder_hidden_states
_lowerCamelCase : List[str] = output.pixel_decoder_hidden_states
_lowerCamelCase : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) ,config.decoder_layers )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Any=False ):
'''simple docstring'''
with torch.no_grad():
_lowerCamelCase : List[str] = MaskaFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(pixel_values=__lowerCAmelCase ,pixel_mask=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase ,output_hidden_states=__lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : int = MaskaFormerForUniversalSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase: Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(pixel_values=__lowerCAmelCase ,pixel_mask=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : Tuple = model(
pixel_values=__lowerCAmelCase ,pixel_mask=__lowerCAmelCase ,mask_labels=__lowerCAmelCase ,class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = MaskaFormerModelTester(self )
_lowerCamelCase : int = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__lowerCAmelCase ,**__lowerCAmelCase ,output_hidden_states=__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) ,device=__lowerCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) ,device=__lowerCAmelCase ),
"class_labels": torch.zeros(2 ,10 ,device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : Dict = self.model_tester.get_config()
_lowerCamelCase : Dict = MaskaFormerForUniversalSegmentation(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__lowerCAmelCase ,**__lowerCAmelCase ,output_hidden_states=__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase ,output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCamelCase : Tuple = self.all_model_classes[1]
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : int = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,mask_labels=__lowerCAmelCase ,class_labels=__lowerCAmelCase ).loss
loss.backward()
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : str = self.all_model_classes[1]
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = True
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Dict = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,mask_labels=__lowerCAmelCase ,class_labels=__lowerCAmelCase )
_lowerCamelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase : str = 1e-4
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase ,(1, 3, 384, 384) )
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
_lowerCamelCase : Dict = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__lowerCAmelCase ,atol=__lowerCAmelCase ) )
_lowerCamelCase : int = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__lowerCAmelCase ,atol=__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__lowerCAmelCase ,atol=__lowerCAmelCase ) )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowerCAmelCase ).eval()
_lowerCamelCase : List[str] = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase ,(1, 3, 384, 384) )
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_lowerCamelCase : str = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
_lowerCamelCase : str = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__lowerCAmelCase ,atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Optional[Any] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Optional[int] = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__lowerCAmelCase ,atol=__lowerCAmelCase ) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowerCAmelCase ).eval()
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : Dict = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="pt" ,)
_lowerCamelCase : List[str] = inputs["pixel_values"].to(__lowerCAmelCase )
_lowerCamelCase : List[str] = [el.to(__lowerCAmelCase ) for el in inputs["mask_labels"]]
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 713
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = 42
class A_ ( _a , _a ):
@register_to_config
def __init__( self: List[Any] ,__lowerCAmelCase: int = 65_536 ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 2 ,__lowerCAmelCase: int = 0 ,__lowerCAmelCase: str = "fourier" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,__lowerCAmelCase: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,__lowerCAmelCase: Tuple[str] = "UNetMidBlock1D" ,__lowerCAmelCase: str = None ,__lowerCAmelCase: Tuple[int] = (32, 32, 64) ,__lowerCAmelCase: str = None ,__lowerCAmelCase: int = 8 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: bool = False ,):
'''simple docstring'''
super().__init__()
_lowerCamelCase : List[str] = sample_size
# time
if time_embedding_type == "fourier":
_lowerCamelCase : Optional[Any] = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=__lowerCAmelCase ,log=__lowerCAmelCase ,flip_sin_to_cos=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCamelCase : Any = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=__lowerCAmelCase ,downscale_freq_shift=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = block_out_channels[0]
if use_timestep_embedding:
_lowerCamelCase : str = block_out_channels[0] * 4
_lowerCamelCase : str = TimestepEmbedding(
in_channels=__lowerCAmelCase ,time_embed_dim=__lowerCAmelCase ,act_fn=__lowerCAmelCase ,out_dim=block_out_channels[0] ,)
_lowerCamelCase : int = nn.ModuleList([] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Tuple = nn.ModuleList([] )
_lowerCamelCase : List[str] = None
# down
_lowerCamelCase : List[Any] = in_channels
for i, down_block_type in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = output_channel
_lowerCamelCase : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCamelCase : Tuple = i == len(__lowerCAmelCase ) - 1
_lowerCamelCase : List[Any] = get_down_block(
__lowerCAmelCase ,num_layers=__lowerCAmelCase ,in_channels=__lowerCAmelCase ,out_channels=__lowerCAmelCase ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(__lowerCAmelCase )
# mid
_lowerCamelCase : Optional[Any] = get_mid_block(
__lowerCAmelCase ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=__lowerCAmelCase ,add_downsample=__lowerCAmelCase ,)
# up
_lowerCamelCase : Optional[int] = list(reversed(__lowerCAmelCase ) )
_lowerCamelCase : Tuple = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCamelCase : Tuple = out_channels
else:
_lowerCamelCase : Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = output_channel
_lowerCamelCase : List[str] = (
reversed_block_out_channels[i + 1] if i < len(__lowerCAmelCase ) - 1 else final_upsample_channels
)
_lowerCamelCase : Union[str, Any] = i == len(__lowerCAmelCase ) - 1
_lowerCamelCase : Tuple = get_up_block(
__lowerCAmelCase ,num_layers=__lowerCAmelCase ,in_channels=__lowerCAmelCase ,out_channels=__lowerCAmelCase ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(__lowerCAmelCase )
_lowerCamelCase : Dict = output_channel
# out
_lowerCamelCase : Dict = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
_lowerCamelCase : List[Any] = get_out_block(
out_block_type=__lowerCAmelCase ,num_groups_out=__lowerCAmelCase ,embed_dim=block_out_channels[0] ,out_channels=__lowerCAmelCase ,act_fn=__lowerCAmelCase ,fc_dim=block_out_channels[-1] // 4 ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Dict = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_lowerCamelCase : int = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_lowerCamelCase : Optional[Any] = timesteps[None].to(sample.device )
_lowerCamelCase : Dict = self.time_proj(__lowerCAmelCase )
if self.config.use_timestep_embedding:
_lowerCamelCase : Any = self.time_mlp(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[int] = timestep_embed[..., None]
_lowerCamelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCamelCase : Any = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCamelCase : Any = ()
for downsample_block in self.down_blocks:
_lowerCamelCase, _lowerCamelCase : Dict = downsample_block(hidden_states=__lowerCAmelCase ,temb=__lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCamelCase : Union[str, Any] = self.mid_block(__lowerCAmelCase ,__lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCamelCase : Any = down_block_res_samples[-1:]
_lowerCamelCase : Tuple = down_block_res_samples[:-1]
_lowerCamelCase : str = upsample_block(__lowerCAmelCase ,res_hidden_states_tuple=__lowerCAmelCase ,temb=__lowerCAmelCase )
# 5. post-process
if self.out_block:
_lowerCamelCase : List[str] = self.out_block(__lowerCAmelCase ,__lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCAmelCase )
| 386
| 0
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_a = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = test_results.split(''' ''' )
_UpperCamelCase = 0
_UpperCamelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCamelCase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = None
_UpperCamelCase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __snake_case ):
_UpperCamelCase = True
_UpperCamelCase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
_UpperCamelCase = line
_UpperCamelCase = False
return failures
class _UpperCAmelCase:
def __init__( self , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = title
_UpperCamelCase = doc_test_results['''time_spent'''].split(''',''')[0]
_UpperCamelCase = doc_test_results['''success''']
_UpperCamelCase = doc_test_results['''failures''']
_UpperCamelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCamelCase = doc_test_results
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = [self._time_spent]
_UpperCamelCase = 0
for time in time_spent:
_UpperCamelCase = time.split(''':''')
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a) == 1:
_UpperCamelCase = [0, 0, time_parts[0]]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F'''{int(__a)}h{int(__a)}m{int(__a)}s'''
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = 40
_UpperCamelCase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__a , __a)}
_UpperCamelCase = ''''''
for category, failures in category_failures.items():
if len(__a) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(__a)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(__a)
@staticmethod
def UpperCAmelCase ( ) -> str:
'''simple docstring'''
_UpperCamelCase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''')
print(json.dumps({'''blocks''': json.loads(__a)}))
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=__a , )
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
print('''Sending the following payload''')
print(json.dumps({'''blocks''': json.loads(self.payload)}))
_UpperCamelCase = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
_UpperCamelCase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=__a , )
def UpperCAmelCase ( self , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ''''''
for key, value in failures.items():
_UpperCamelCase = value[:2_00] + ''' [Truncated]''' if len(__a) > 2_50 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
_UpperCamelCase = job_name
_UpperCamelCase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
_UpperCamelCase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''')
_UpperCamelCase = self.doc_test_results.pop('''job_link''')
self.doc_test_results.pop('''failures''')
self.doc_test_results.pop('''success''')
self.doc_test_results.pop('''time_spent''')
_UpperCamelCase = sorted(self.doc_test_results.items() , key=lambda __a: t[0])
for job, job_result in sorted_dict:
if len(job_result['''failures''']):
_UpperCamelCase = F'''*Num failures* :{len(job_result["failed"])} \n'''
_UpperCamelCase = job_result['''failures''']
_UpperCamelCase = self.get_reply_blocks(__a , __a , __a , text=__a)
print('''Sending the following reply''')
print(json.dumps({'''blocks''': blocks}))
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=__a , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = os.environ['''GITHUB_RUN_ID''']
_UpperCamelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_UpperCamelCase = requests.get(__snake_case ).json()
_UpperCamelCase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_UpperCamelCase = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__snake_case ):
_UpperCamelCase = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __snake_case )
return {}
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {}
if os.path.exists(__snake_case ):
_UpperCamelCase = os.listdir(__snake_case )
for file in files:
try:
with open(os.path.join(__snake_case, __snake_case ), encoding='''utf-8''' ) as f:
_UpperCamelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__snake_case, __snake_case )}.''' ) from e
return _artifact
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
class _UpperCAmelCase:
def __init__( self , __a) -> str:
'''simple docstring'''
_UpperCamelCase = name
_UpperCamelCase = []
def __str__( self) -> int:
'''simple docstring'''
return self.name
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path})
_UpperCamelCase = {}
_UpperCamelCase = filter(os.path.isdir, os.listdir() )
for directory in directories:
_UpperCamelCase = directory
if artifact_name not in _available_artifacts:
_UpperCamelCase = Artifact(__snake_case )
_available_artifacts[artifact_name].add_path(__snake_case )
return _available_artifacts
if __name__ == "__main__":
_a = get_job_links()
_a = retrieve_available_artifacts()
_a = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_a = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_a = github_actions_job_links.get("""run_doctests""")
_a = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
_a = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
_a , _a , _a = handle_test_results(artifact["""stats"""])
_a = failed
_a = success
_a = time_spent[1:-1] + """, """
_a = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
_a = line.replace("""FAILED """, """""")
_a = line.split()[0].replace("""\n""", """""")
if "::" in line:
_a , _a = line.split("""::""")
else:
_a , _a = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_a = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_a = all_failures[test] if test in all_failures else """N/A"""
_a = failure
break
_a = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 19
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a = 100
_a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase = set()
_UpperCamelCase = 42
_UpperCamelCase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1, __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19
| 1
|
from __future__ import annotations
import math
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = str(__UpperCAmelCase )
UpperCAmelCase_ = [n]
for i in range(1 , len(__UpperCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if len(str(__UpperCAmelCase ) ) > 3:
if not is_prime(int(str(__UpperCAmelCase )[-3:] ) ) or not is_prime(int(str(__UpperCAmelCase )[:3] ) ):
return False
return True
def A ( __UpperCAmelCase = 11 ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = 13
while len(__UpperCAmelCase ) != count:
if validate(__UpperCAmelCase ):
UpperCAmelCase_ = list_truncated_nums(__UpperCAmelCase )
if all(is_prime(__UpperCAmelCase ) for i in list_nums ):
list_truncated_primes.append(__UpperCAmelCase )
num += 2
return list_truncated_primes
def A ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"{sum(compute_truncated_primes(11)) = }")
| 561
|
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ = head.next, head
while fast and fast.next:
UpperCAmelCase_ = fast.next.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = slow.next
UpperCAmelCase_ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ = None
while second:
UpperCAmelCase_ = second.next
UpperCAmelCase_ = node
UpperCAmelCase_ = second
UpperCAmelCase_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ = node.next
UpperCAmelCase_ = head.next
return True
def A ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ = [slow.val]
while slow.next:
UpperCAmelCase_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ = cur.next
return True
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
while head:
if head.val in d:
d[head.val].append(__UpperCAmelCase )
else:
UpperCAmelCase_ = [pos]
UpperCAmelCase_ = head.next
pos += 1
UpperCAmelCase_ = pos - 1
UpperCAmelCase_ = 0
for v in d.values():
if len(__UpperCAmelCase ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ = 0
for i in range(0 , len(__UpperCAmelCase ) ):
if v[i] + v[len(__UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 561
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( lowerCamelCase ):
a__ = 42
a__ = 42
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 2_0_0_0 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = self.unet.config.sample_size
__magic_name__ :Optional[Any] = (batch_size, 3, img_size, img_size)
__magic_name__ :Tuple = self.unet
__magic_name__ :Dict = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
__magic_name__ :str = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__magic_name__ :Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__magic_name__ :List[Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
__magic_name__ :Tuple = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , __lowerCAmelCase ).sample
__magic_name__ :Dict = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
__magic_name__ , __magic_name__ :List[str] = output.prev_sample, output.prev_sample_mean
__magic_name__ :Tuple = sample_mean.clamp(0 , 1 )
__magic_name__ :List[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ :Union[str, Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 0
|
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_snake_case : Union[str, Any] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(__lowerCAmelCase )}'''
)
raise ValueError(__lowerCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyImgaImgPipeline
__snake_case = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
__snake_case = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__snake_case = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__snake_case = False
@property
def _snake_case ( self: List[Any] ):
return 32
@property
def _snake_case ( self: Any ):
return 32
@property
def _snake_case ( self: Dict ):
return self.time_input_dim
@property
def _snake_case ( self: Optional[Any] ):
return self.time_input_dim * 4
@property
def _snake_case ( self: Any ):
return 100
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : Dict = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCamelCase : Any = MultilingualCLIP(a )
__lowerCamelCase : Dict = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase : int = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Optional[int] = UNetaDConditionModel(**a )
return model
@property
def _snake_case ( self: Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self: List[str] ):
torch.manual_seed(0 )
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self: int ):
__lowerCamelCase : List[str] = self.dummy_text_encoder
__lowerCamelCase : str = self.dummy_tokenizer
__lowerCamelCase : List[Any] = self.dummy_unet
__lowerCamelCase : List[str] = self.dummy_movq
__lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__lowerCamelCase : List[Any] = DDIMScheduler(**a )
__lowerCamelCase : Optional[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _snake_case ( self: Optional[int] , a: List[Any] , a: List[str]=0 ):
__lowerCamelCase : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
__lowerCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
__lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
__lowerCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Optional[int] = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((256, 256) )
if str(a ).startswith('mps' ):
__lowerCamelCase : Tuple = torch.manual_seed(a )
else:
__lowerCamelCase : List[str] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Dict ):
__lowerCamelCase : Optional[Any] = 'cpu'
__lowerCamelCase : List[str] = self.get_dummy_components()
__lowerCamelCase : Optional[int] = self.pipeline_class(**a )
__lowerCamelCase : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = pipe(**self.get_dummy_inputs(a ) )
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : int = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Dict = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCamelCase : Optional[Any] = 'A red cartoon frog, 4k'
__lowerCamelCase : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
__lowerCamelCase : str = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
__lowerCamelCase : Optional[int] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase , __lowerCamelCase : List[str] = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCamelCase : int = pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__lowerCamelCase : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a )
| 230
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
'''simple docstring'''
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42 # [batch_size x 3]
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
def _snake_case ( self: str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _snake_case ( self: Dict ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _snake_case ( self: List[str] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = torch.arange(self.height * self.width )
__lowerCamelCase : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(a , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def _snake_case ( self: Optional[int] ):
__lowerCamelCase , *__lowerCamelCase : int = self.shape
__lowerCamelCase : Optional[Any] = int(np.prod(a ) )
__lowerCamelCase : Dict = self.get_image_coords()
__lowerCamelCase : Optional[Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCamelCase : Tuple = self.get_camera_rays(a )
__lowerCamelCase : Union[str, Any] = rays.view(a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _snake_case ( self: Optional[Any] , a: torch.Tensor ):
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase : Union[str, Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase : Union[str, Any] = coords.view(a , -1 , 2 )
__lowerCamelCase : Dict = self.resolution()
__lowerCamelCase : List[Any] = self.fov()
__lowerCamelCase : str = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase : Union[str, Any] = fracs * torch.tan(fov / 2 )
__lowerCamelCase : Dict = fracs.view(a , -1 , 2 )
__lowerCamelCase : Dict = (
self.z.view(a , 1 , 3 )
+ self.x.view(a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(a , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCamelCase : int = directions / directions.norm(dim=-1 , keepdim=a )
__lowerCamelCase : Any = torch.stack(
[
torch.broadcast_to(self.origin.view(a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(a , *a , 2 , 3 )
def _snake_case ( self: int , a: int , a: int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=a , height=a , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : str = []
__lowerCamelCase : Optional[int] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowerCamelCase : Tuple = np.array([np.sin(SCREAMING_SNAKE_CASE__ ), np.cos(SCREAMING_SNAKE_CASE__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase : Optional[Any] = -z * 4
__lowerCamelCase : Any = np.array([np.cos(SCREAMING_SNAKE_CASE__ ), -np.sin(SCREAMING_SNAKE_CASE__ ), 0.0] )
__lowerCamelCase : Optional[int] = np.cross(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
origins.append(SCREAMING_SNAKE_CASE__ )
xs.append(SCREAMING_SNAKE_CASE__ )
ys.append(SCREAMING_SNAKE_CASE__ )
zs.append(SCREAMING_SNAKE_CASE__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(SCREAMING_SNAKE_CASE__ , axis=0 ) ).float() , width=SCREAMING_SNAKE_CASE__ , height=SCREAMING_SNAKE_CASE__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(SCREAMING_SNAKE_CASE__ )) , )
| 230
| 1
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : int=1024 , __magic_name__ : Union[str, Any]=1024 , __magic_name__ : Union[str, Any]=3.6 ):
"""simple docstring"""
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = tokenizer.bos_token_id
lowerCAmelCase__ = dataset
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ):
"""simple docstring"""
lowerCAmelCase__ = iter(self.dataset )
lowerCAmelCase__ = True
while more_examples:
lowerCAmelCase__ ,lowerCAmelCase__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__magic_name__ )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCAmelCase__ = False
break
lowerCAmelCase__ = tokenizer(__magic_name__ , truncation=__magic_name__ )["input_ids"]
lowerCAmelCase__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__magic_name__ ) , self.seq_length ):
lowerCAmelCase__ = all_token_ids[i : i + self.seq_length]
if len(__magic_name__ ) == self.seq_length:
yield torch.tensor(__magic_name__ )
def A ( UpperCamelCase_ : str ) -> int:
'''simple docstring'''
lowerCAmelCase__ = {"streaming": True}
lowerCAmelCase__ = load_dataset(args.dataset_name , split="train" , **UpperCamelCase_ )
lowerCAmelCase__ = ConstantLengthDataset(UpperCamelCase_ , UpperCamelCase_ , seq_length=args.seq_length )
lowerCAmelCase__ = DataLoader(UpperCamelCase_ , batch_size=args.batch_size )
return eval_dataloader
def A ( UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
model.eval()
lowerCAmelCase__ = []
for step, batch in enumerate(UpperCamelCase_ ):
with torch.no_grad():
lowerCAmelCase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
lowerCAmelCase__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCamelCase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCAmelCase__ = torch.mean(torch.cat(UpperCamelCase_ ) )
try:
lowerCAmelCase__ = torch.exp(UpperCamelCase_ )
except OverflowError:
lowerCAmelCase__ = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
UpperCAmelCase__ : Any = Accelerator()
# Parse configuration
UpperCAmelCase__ : Tuple = HfArgumentParser(EvaluationArguments)
UpperCAmelCase__ : int = parser.parse_args()
set_seed(args.seed)
# Logging
UpperCAmelCase__ : Any = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
UpperCAmelCase__ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
UpperCAmelCase__ : Tuple = create_dataloader(args)
# Prepare everything with our `accelerator`.
UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = evaluate(args)
logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 48
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , A = 16 , A = 88 , A = None , A = None , A = 1 , A = 0.0 , A = 32 , A = None , A = False , A = None , A = "geglu" , A = True , A = True , ) -> Union[str, Any]:
super().__init__()
A: Union[str, Any] = num_attention_heads
A: Optional[Any] = attention_head_dim
A: Optional[int] = num_attention_heads * attention_head_dim
A: str = in_channels
A: List[Any] = torch.nn.GroupNorm(num_groups=A , num_channels=A , eps=1e-6 , affine=A )
A: Optional[int] = nn.Linear(A , A )
# 3. Define transformers blocks
A: Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
A , A , A , dropout=A , cross_attention_dim=A , activation_fn=A , attention_bias=A , double_self_attention=A , norm_elementwise_affine=A , )
for d in range(A )
] )
A: Tuple = nn.Linear(A , A )
def a__ ( self , A , A=None , A=None , A=None , A=1 , A=None , A = True , ) -> str:
A , A , A , A: Optional[Any] = hidden_states.shape
A: Optional[Any] = batch_frames // num_frames
A: List[str] = hidden_states
A: List[str] = hidden_states[None, :].reshape(A , A , A , A , A )
A: Dict = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
A: List[str] = self.norm(A )
A: List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , A , A )
A: Optional[Any] = self.proj_in(A )
# 2. Blocks
for block in self.transformer_blocks:
A: int = block(
A , encoder_hidden_states=A , timestep=A , cross_attention_kwargs=A , class_labels=A , )
# 3. Output
A: Tuple = self.proj_out(A )
A: List[str] = (
hidden_states[None, None, :]
.reshape(A , A , A , A , A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
A: Optional[int] = hidden_states.reshape(A , A , A , A )
A: Optional[int] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=A )
| 135
| 0
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[Any] = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 119
|
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE :Tuple = list[tuple[int, int]]
__SCREAMING_SNAKE_CASE :Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE :Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : float , snake_case_ : Node | None , ):
_UpperCAmelCase = pos_x
_UpperCAmelCase = pos_y
_UpperCAmelCase = (pos_y, pos_x)
_UpperCAmelCase = goal_x
_UpperCAmelCase = goal_y
_UpperCAmelCase = g_cost
_UpperCAmelCase = parent
_UpperCAmelCase = self.calculate_heuristic()
def lowercase ( self : List[Any] ):
_UpperCAmelCase = abs(self.pos_x - self.goal_x )
_UpperCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : List[str] , snake_case_ : List[Any] ):
return self.f_cost < other.f_cost
class A_ :
def __init__( self : Tuple , snake_case_ : tuple[int, int] , snake_case_ : tuple[int, int] ):
_UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_UpperCAmelCase = [self.start]
_UpperCAmelCase = []
_UpperCAmelCase = False
def lowercase ( self : int ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCAmelCase = True
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_UpperCAmelCase = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
if not self.reached:
return [self.start.pos]
return None
def lowercase ( self : List[str] , snake_case_ : Node ):
_UpperCAmelCase = []
for action in delta:
_UpperCAmelCase = parent.pos_x + action[1]
_UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def lowercase ( self : Any , snake_case_ : Node | None ):
_UpperCAmelCase = node
_UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :int = (0, 0)
__SCREAMING_SNAKE_CASE :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__SCREAMING_SNAKE_CASE :Union[str, Any] = GreedyBestFirst(init, goal)
__SCREAMING_SNAKE_CASE :Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__SCREAMING_SNAKE_CASE :Dict = 2
for elem in grid:
print(elem)
| 119
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = CLIPTokenizer
__lowercase : Tuple = CLIPTokenizerFast
__lowercase : str = True
__lowercase : Optional[int] = {}
__lowercase : str = False
def UpperCAmelCase_ ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase__ : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase__ : Union[str, Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
lowerCAmelCase__ : Dict = {"""unk_token""": """<unk>"""}
lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : str = """lower newer"""
lowerCAmelCase__ : str = """lower newer"""
return input_text, output_text
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase__ : Optional[int] = """lower newer"""
lowerCAmelCase__ : Optional[Any] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
lowerCAmelCase__ : str = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Tuple = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
@require_ftfy
def UpperCAmelCase_ ( self ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : str = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
lowerCAmelCase__ : List[str] = tokenizer_s.tokenize(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase__ : List[Any] = """xa\u0303y""" + """ """ + """x\xe3y"""
lowerCAmelCase__ : List[str] = tokenizer_s.tokenize(__UpperCAmelCase )
lowerCAmelCase__ : Any = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase__ : List[Any] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase__ : Optional[Any] = tokenizer_s.tokenize(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase__ : Union[str, Any] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase__ : int = tokenizer_s.tokenize(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : Dict = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ : List[Any] = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,)
lowerCAmelCase__ : Tuple = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
lowerCAmelCase__ : Any = F""" {text}"""
lowerCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,)
lowerCAmelCase__ : List[Any] = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def UpperCAmelCase_ ( self ) -> List[Any]:
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self ) -> int:
# CLIP always lower cases letters
pass
| 565
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if "resnet-50" in model_name:
lowerCAmelCase__ : int = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
lowerCAmelCase__ : Dict = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
lowerCAmelCase__ : Tuple = DetrConfig(use_timm_backbone=UpperCamelCase , backbone_config=UpperCamelCase )
# set label attributes
lowerCAmelCase__ : str = """panoptic""" in model_name
if is_panoptic:
lowerCAmelCase__ : Union[str, Any] = 250
else:
lowerCAmelCase__ : Union[str, Any] = 91
lowerCAmelCase__ : Optional[Any] = """huggingface/label-files"""
lowerCAmelCase__ : int = """coco-detection-id2label.json"""
lowerCAmelCase__ : Dict = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = idalabel
lowerCAmelCase__ : str = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Any = val
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """"""
if is_panoptic:
lowerCAmelCase__ : Union[str, Any] = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ : Optional[int] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ : Optional[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[:256, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias[:256]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[256:512, :]
lowerCAmelCase__ : List[Any] = in_proj_bias[256:512]
lowerCAmelCase__ : Optional[int] = in_proj_weight[-256:, :]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ : int = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : List[str] = in_proj_weight[:256, :]
lowerCAmelCase__ : Any = in_proj_bias[:256]
lowerCAmelCase__ : Optional[Any] = in_proj_weight[256:512, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias[256:512]
lowerCAmelCase__ : List[Any] = in_proj_weight[-256:, :]
lowerCAmelCase__ : Any = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ : Union[str, Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCAmelCase__ : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase__ : Tuple = in_proj_bias_cross_attn[:256]
lowerCAmelCase__ : str = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias_cross_attn[256:512]
lowerCAmelCase__ : Optional[Any] = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias_cross_attn[-256:]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ : Optional[int] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_detr_config(UpperCamelCase )
# load original model from torch hub
lowerCAmelCase__ : Union[str, Any] = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(f"""Converting model {model_name}...""" )
lowerCAmelCase__ : List[Any] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=UpperCamelCase ).eval()
lowerCAmelCase__ : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCamelCase ):
if is_panoptic:
lowerCAmelCase__ : List[str] = """detr.""" + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ : int = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCAmelCase__ : Optional[Any] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase__ : Any = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : List[str] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCAmelCase__ : List[Any] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Dict = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCAmelCase__ : Dict = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase__ : int = DetrForSegmentation(UpperCamelCase ) if is_panoptic else DetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion on an image
lowerCAmelCase__ : Union[str, Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCAmelCase__ : List[Any] = DetrImageProcessor(format=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase__ : Union[str, Any] = encoding["""pixel_values"""]
lowerCAmelCase__ : List[Any] = detr(UpperCamelCase )
lowerCAmelCase__ : List[Any] = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
_lowerCAmelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 565
| 1
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 700
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCamelCase_ : int = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
lowerCamelCase_ : str = str(bin(__UpperCAmelCase ) )[2:]
lowerCamelCase_ : Dict = max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418
| 0
|
def _snake_case (_snake_case : Any) -> list:
if n_term == "":
return []
_lowercase =[]
for temp in range(int(UpperCAmelCase__)):
series.append(f'''1/{temp + 1}''' if series else '1')
return series
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 181
|
import unittest
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , ) -> np.ndarray:
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: str = np.shape(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
UpperCamelCase_: Any = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
UpperCamelCase_: int = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
UpperCamelCase_: Dict = pseudo_inv
if a_inv is None:
try:
UpperCamelCase_: Optional[Any] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: Dict = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: Tuple = np.array([[2, 1], [6, 3]] )
UpperCamelCase_: Tuple = schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = np.block([[a, b], [b.T, c]] )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: List[str] = np.linalg.det(_lowerCamelCase )
UpperCamelCase_: Dict = np.linalg.det(_lowerCamelCase )
self.assertAlmostEqual(_lowerCamelCase , det_a * det_s )
def _a ( self ):
UpperCamelCase_: int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCamelCase_: str = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCamelCase_: List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowerCamelCase ):
schur_complement(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 57
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __lowerCAmelCase ):
'''simple docstring'''
__UpperCAmelCase = "encoder-decoder"
__UpperCAmelCase = True
def __init__(self , **lowerCAmelCase__ ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCamelCase : str = kwargs.pop("encoder" )
_UpperCamelCase : Optional[int] = encoder_config.pop("model_type" )
_UpperCamelCase : Dict = kwargs.pop("decoder" )
_UpperCamelCase : Optional[Any] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Any = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCamelCase : Optional[int] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCamelCase : Dict = True
@classmethod
def lowercase_ (cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_UpperCamelCase : Any = True
_UpperCamelCase : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
_UpperCamelCase : Dict = self.encoder.to_dict()
_UpperCamelCase : int = self.decoder.to_dict()
_UpperCamelCase : Optional[int] = self.__class__.model_type
return output
| 718
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """vit_mae"""
def __init__(self , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=2_24 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=16 , lowerCAmelCase__=5_12 , lowerCAmelCase__=8 , lowerCAmelCase__=20_48 , lowerCAmelCase__=0.75 , lowerCAmelCase__=False , **lowerCAmelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Optional[int] = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Dict = image_size
_UpperCamelCase : Union[str, Any] = patch_size
_UpperCamelCase : List[Any] = num_channels
_UpperCamelCase : Optional[int] = qkv_bias
_UpperCamelCase : List[str] = decoder_num_attention_heads
_UpperCamelCase : int = decoder_hidden_size
_UpperCamelCase : Dict = decoder_num_hidden_layers
_UpperCamelCase : Dict = decoder_intermediate_size
_UpperCamelCase : str = mask_ratio
_UpperCamelCase : List[str] = norm_pix_loss
| 239
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.