code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Any = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "van"
def __init__( self : int ,A : str=2_24 ,A : int=3 ,A : Optional[Any]=[7, 3, 3, 3] ,A : Tuple=[4, 2, 2, 2] ,A : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,A : Optional[int]=[3, 3, 12, 3] ,A : Optional[int]=[8, 8, 4, 4] ,A : Any="gelu" ,A : Union[str, Any]=0.02 ,A : Union[str, Any]=1E-6 ,A : Dict=1E-2 ,A : str=0.0 ,A : Optional[int]=0.0 ,**A : List[Any] ,):
super().__init__(**A )
__A = image_size
__A = num_channels
__A = patch_sizes
__A = strides
__A = hidden_sizes
__A = depths
__A = mlp_ratios
__A = hidden_act
__A = initializer_range
__A = layer_norm_eps
__A = layer_scale_init_value
__A = drop_path_rate
__A = dropout_rate
| 55
|
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55
| 1
|
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ , snake_case__ : List[Any] = emb.weight.shape
snake_case__ : Union[str, Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
snake_case__ : str = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
snake_case__ : int = torch.load(UpperCAmelCase , map_location="""cpu""" )
snake_case__ : str = Namespace(**checkpoint["""cfg"""]["""model"""] )
snake_case__ : Union[str, Any] = checkpoint["""model"""]
remove_ignore_keys_(UpperCAmelCase )
snake_case__ : Dict = state_dict["""decoder.embed_tokens.weight"""].shape[0]
snake_case__ : str = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
snake_case__ : Any = XGLMConfig(
vocab_size=UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
snake_case__ : List[str] = XGLMForCausalLM(UpperCAmelCase )
snake_case__ : Optional[int] = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
print(UpperCAmelCase )
snake_case__ : Any = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 172
|
'''simple docstring'''
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
return abs(UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
snake_case__ , snake_case__ : Tuple = y, x % y
return abs(UpperCAmelCase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
try:
snake_case__ : Optional[Any] = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
snake_case__ : Optional[int] = int(nums[0] )
snake_case__ : Optional[int] = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(UpperCAmelCase , UpperCAmelCase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCAmelCase , UpperCAmelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 172
| 1
|
'''simple docstring'''
from __future__ import annotations
import requests
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
_A = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(__lowercase ).json()
def __lowercase ( __lowercase = 10 ) -> list[dict]:
'''simple docstring'''
_A = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_A = requests.get(__lowercase ).json()[:max_stories]
return [get_hackernews_story(__lowercase ) for story_id in story_ids]
def __lowercase ( __lowercase = 10 ) -> str:
'''simple docstring'''
_A = hackernews_top_stories(__lowercase )
return "\n".join("* [{title}]({url})".format(**__lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 330
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=19 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=5 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Dict=37 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : List[str]=16 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Tuple=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__UpperCAmelCase , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = EsmForProteinFolding(config=__UpperCAmelCase ).float()
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = False
snake_case = (EsmForProteinFolding,) if is_torch_available() else ()
snake_case = ()
snake_case = {} if is_torch_available() else {}
snake_case = False
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = EsmFoldModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip("Does not support attention outputs" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("ESMFold only has one output format." )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support input chunking." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@require_torch
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
_A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(__UpperCAmelCase )["positions"]
_A = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __UpperCAmelCase , atol=1E-4 ) )
| 330
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _A ( UpperCAmelCase_ ):
lowercase_ : Optional[Any] = ['''input_features''', '''is_longer''']
def __init__( self : Dict , lowerCamelCase__ : List[Any]=64 , lowerCamelCase__ : Optional[Any]=4_80_00 , lowerCamelCase__ : str=4_80 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : List[Any]=10_24 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : float = 0 , lowerCamelCase__ : float = 1_40_00 , lowerCamelCase__ : int = None , lowerCamelCase__ : str = "fusion" , lowerCamelCase__ : str = "repeatpad" , **lowerCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : List[Any] = top_db
__UpperCamelCase : Optional[int] = truncation
__UpperCamelCase : List[str] = padding
__UpperCamelCase : int = fft_window_size
__UpperCamelCase : List[Any] = (fft_window_size >> 1) + 1
__UpperCamelCase : Dict = hop_length
__UpperCamelCase : Optional[int] = max_length_s
__UpperCamelCase : Optional[int] = max_length_s * sampling_rate
__UpperCamelCase : List[Any] = sampling_rate
__UpperCamelCase : Union[str, Any] = frequency_min
__UpperCamelCase : Union[str, Any] = frequency_max
__UpperCamelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale="""htk""" , )
__UpperCamelCase : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , )
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
__UpperCamelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a ( self : str , lowerCamelCase__ : np.array , lowerCamelCase__ : Optional[np.array] = None ):
"""simple docstring"""
__UpperCamelCase : Tuple = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def a ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCamelCase : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase : Optional[int] = [0]
# randomly choose index for each part
__UpperCamelCase : List[str] = np.random.choice(ranges[0] )
__UpperCamelCase : Union[str, Any] = np.random.choice(ranges[1] )
__UpperCamelCase : Optional[Any] = np.random.choice(ranges[2] )
__UpperCamelCase : Dict = mel[idx_front : idx_front + chunk_frames, :]
__UpperCamelCase : Union[str, Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCamelCase : Union[str, Any] = mel[idx_back : idx_back + chunk_frames, :]
__UpperCamelCase : Union[str, Any] = torch.tensor(mel[None, None, :] )
__UpperCamelCase : Dict = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowerCamelCase__ )
__UpperCamelCase : int = mel_shrink[0][0].numpy()
__UpperCamelCase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a ( self : Optional[int] , lowerCamelCase__ : np.array , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCamelCase : Optional[int] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCamelCase : Tuple = len(lowerCamelCase__ ) - max_length
__UpperCamelCase : Optional[Any] = np.random.randint(0 , overflow + 1 )
__UpperCamelCase : Optional[Any] = waveform[idx : idx + max_length]
__UpperCamelCase : Tuple = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCamelCase : Optional[Any] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__UpperCamelCase : Optional[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCamelCase : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCamelCase : Optional[int] = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCamelCase : Tuple = False
else:
__UpperCamelCase : Union[str, Any] = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
__UpperCamelCase : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCamelCase : List[Any] = int(max_length / len(lowerCamelCase__ ) )
__UpperCamelCase : List[Any] = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCamelCase : List[Any] = int(max_length / len(lowerCamelCase__ ) )
__UpperCamelCase : Optional[Any] = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCamelCase : List[Any] = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
__UpperCamelCase : Any = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__UpperCamelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCamelCase : List[Any] = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : str = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Optional[Any] , ):
"""simple docstring"""
__UpperCamelCase : str = truncation if truncation is not None else self.truncation
__UpperCamelCase : Tuple = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__UpperCamelCase : str = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__UpperCamelCase : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase : str = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
__UpperCamelCase : str = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase : Union[str, Any] = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCamelCase : Optional[Any] = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
__UpperCamelCase : int = []
__UpperCamelCase : Union[str, Any] = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCamelCase : List[Any] = np.random.randint(0 , len(lowerCamelCase__ ) )
__UpperCamelCase : int = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
__UpperCamelCase : Optional[Any] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCamelCase : Dict = [[longer] for longer in is_longer]
__UpperCamelCase : str = {"""input_features""": input_mel, """is_longer""": is_longer}
__UpperCamelCase : Dict = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
__UpperCamelCase : Optional[int] = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 515
|
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] ) -> List[str]:
__UpperCamelCase : Any = [x.strip() for x in open(__lowerCAmelCase ).readlines()]
__UpperCamelCase : Dict = [x.strip() for x in open(__lowerCAmelCase ).readlines()][: len(__lowerCAmelCase )]
__UpperCamelCase : Optional[Any] = calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
if save_path is not None:
save_json(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 515
| 1
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger()
__SCREAMING_SNAKE_CASE : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
def _UpperCAmelCase ( self : Any , lowerCAmelCase : List[str] ):
os.makedirs(A__ , exist_ok=A__ )
A_ = {"source": "What is love ?", "target": "life"}
A_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
A_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(A__ , F"{split}.{field}" ) , "w" ) as f:
f.write(A__ )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] = "pytorch" ):
A_ = self.get_auto_remove_tmp_dir()
A_ = os.path.join(A__ , "output" )
A_ = os.path.join(A__ , "data" )
self._create_dummy_data(data_dir=A__ )
A_ = F"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(F"--gpus={gpus}" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
A_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(A__ , env=self.get_env() )
A_ = os.path.join(A__ , "metrics.json" )
with open(A__ ) as f:
A_ = json.load(A__ )
return result
@require_torch_gpu
def _UpperCAmelCase ( self : List[str] ):
A_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _UpperCAmelCase ( self : Optional[Any] ):
A_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCAmelCase ( self : str ):
A_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCAmelCase ( self : str ):
A_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 452
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase__ ( snake_case__ , snake_case__ ):
snake_case_ = '''bit'''
snake_case_ = ['''preactivation''', '''bottleneck''']
snake_case_ = ['''SAME''', '''VALID''']
def __init__( self , A__=3 , A__=64 , A__=[256, 512, 1024, 2048] , A__=[3, 4, 6, 3] , A__="preactivation" , A__="relu" , A__=None , A__=32 , A__=0.0 , A__=False , A__=32 , A__=1 , A__=None , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase_: str = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: List[Any] = embedding_size
UpperCAmelCase_: Union[str, Any] = hidden_sizes
UpperCAmelCase_: int = depths
UpperCAmelCase_: Tuple = layer_type
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: str = global_padding
UpperCAmelCase_: Dict = num_groups
UpperCAmelCase_: Dict = drop_path_rate
UpperCAmelCase_: Tuple = embedding_dynamic_padding
UpperCAmelCase_: Tuple = output_stride
UpperCAmelCase_: Dict = width_factor
UpperCAmelCase_: str = ["stem"] + [F"stage{idx}" for idx in range(1 , len(A__ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_: Tuple = get_aligned_output_features_output_indices(
out_features=A__ , out_indices=A__ , stage_names=self.stage_names )
| 137
| 0
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCamelCase__ ( A : Dict , A : Optional[int] , A : int , A : Optional[int] ):
'''simple docstring'''
if isinstance(A , A ):
UpperCAmelCase = np.full((len(A ), sequence_length, 2) , A )
else:
UpperCAmelCase = np.full((len(A ), sequence_length) , A )
for i, tensor in enumerate(A ):
if padding_side == "right":
if isinstance(A , A ):
UpperCAmelCase = tensor[:sequence_length]
else:
UpperCAmelCase = tensor[:sequence_length]
else:
if isinstance(A , A ):
UpperCAmelCase = tensor[:sequence_length]
else:
UpperCAmelCase = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
UpperCAmelCase = ord(A )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
UpperCAmelCase = unicodedata.category(A )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : PreTrainedTokenizerBase
__magic_name__ : Union[bool, str, PaddingStrategy] = True
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[int] = None
__magic_name__ : int = -100
__magic_name__ : str = "pt"
def a__( self : Any , lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
import torch
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase = self.tokenizer.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCAmelCase = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase = [
list(lowerCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase )) for label in labels
]
else:
UpperCAmelCase = [
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase )) + list(lowerCAmelCase ) for label in labels
]
UpperCAmelCase = [feature['''ner_tags'''] for feature in features]
UpperCAmelCase = padding_tensor(lowerCAmelCase , -1 , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = [feature['''original_entity_spans'''] for feature in features]
UpperCAmelCase = padding_tensor(lowerCAmelCase , (-1, -1) , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {k: torch.tensor(lowerCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 50
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 1
|
"""simple docstring"""
from math import factorial
def _UpperCAmelCase ( lowerCamelCase__ = 100 ):
"""simple docstring"""
return sum(int(lowerCamelCase__ ) for x in str(factorial(lowerCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 644
|
"""simple docstring"""
from itertools import permutations
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ = [7, 11, 13, 17]
for i, test in enumerate(lowerCamelCase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _UpperCAmelCase ( lowerCamelCase__ = 10 ):
"""simple docstring"""
return sum(
int("""""".join(map(lowerCamelCase__ , lowerCamelCase__ ) ) )
for num in permutations(range(lowerCamelCase__ ) )
if is_substring_divisible(lowerCamelCase__ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 644
| 1
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__lowercase = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__lowercase = logging.WARNING
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =os.getenv('''DATASETS_VERBOSITY''' , __UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCAmelCase ():
"""simple docstring"""
return __name__.split('''.''' )[0]
def lowerCAmelCase ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =_get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =_get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCAmelCase (__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if name is None:
__UpperCamelCase =_get_library_name()
return logging.getLogger(__UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
_get_library_root_logger().setLevel(__UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
return set_verbosity(__UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
return set_verbosity(__UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
return set_verbosity(__UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
return set_verbosity(__UpperCamelCase )
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =False
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ) -> Optional[Any]: # pylint: disable=unused-argument
'''simple docstring'''
__UpperCamelCase =args[0] if args else None
def __iter__( self : List[str] ) -> List[Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Any , UpperCamelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
def empty_fn(*UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ) -> int:
'''simple docstring'''
return self
def __exit__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
return
__lowercase = True
class _lowercase :
"""simple docstring"""
def __call__( self : int , *UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , **UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase__ , **UpperCamelCase__ )
else:
return EmptyTqdm(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> int:
'''simple docstring'''
__UpperCamelCase =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowercase = _tqdm_cls()
def lowerCAmelCase ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def lowerCAmelCase ():
"""simple docstring"""
global _tqdm_active
__UpperCamelCase =True
def lowerCAmelCase ():
"""simple docstring"""
global _tqdm_active
__UpperCamelCase =False
| 296
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = MgpstrTokenizer
lowercase__ = False
lowercase__ = {}
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
def UpperCAmelCase_ ( self : str , **UpperCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase ='''tester'''
__UpperCamelCase ='''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase ='''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__UpperCamelCase =tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase , __UpperCamelCase =self.get_input_output_texts(UpperCamelCase__ )
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , UpperCamelCase__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
| 296
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[Any] = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 519
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__UpperCamelCase : Optional[Any] = 6_378_137.0
__UpperCamelCase : Any = 6_356_752.314_245
__UpperCamelCase : Optional[int] = 6378137
def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
"""simple docstring"""
__lowerCamelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__lowerCamelCase : Tuple = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
__lowerCamelCase : Dict = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__lowerCamelCase : Any = haversine_distance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__lowerCamelCase : Dict = (b_lata + b_lata) / 2
__lowerCamelCase : Union[str, Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__lowerCamelCase : str = (sin(UpperCAmelCase ) ** 2) * (cos(UpperCAmelCase ) ** 2)
__lowerCamelCase : List[Any] = cos(sigma / 2 ) ** 2
__lowerCamelCase : Dict = (sigma - sin(UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__lowerCamelCase : Tuple = (cos(UpperCAmelCase ) ** 2) * (sin(UpperCAmelCase ) ** 2)
__lowerCamelCase : List[str] = sin(sigma / 2 ) ** 2
__lowerCamelCase : List[str] = (sigma + sin(UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519
| 1
|
'''simple docstring'''
import os
def snake_case ( ):
'''simple docstring'''
__lowercase = os.path.dirname(os.path.realpath(lowerCamelCase ) )
__lowercase = os.path.join(lowerCamelCase , """triangle.txt""" )
with open(lowerCamelCase ) as f:
__lowercase = f.readlines()
__lowercase = []
for line in triangle:
__lowercase = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase ) )
a.append(lowerCamelCase )
for i in range(1 , len(lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__lowercase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowercase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase , lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 721
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 0
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase__ : int = 1_0_0
lowerCamelCase__ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def UpperCamelCase ( lowercase_ ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ : set[int] = set()
lowercase__ : int
lowercase__ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( lowercase_ = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , lowercase_ ):
if len(partition(lowercase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 12
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : str , snake_case_ : List[Any]=1_3 , snake_case_ : Any=6_4 , snake_case_ : Optional[int]=2 , snake_case_ : int=3 , snake_case_ : str=True , snake_case_ : Dict=True , snake_case_ : Optional[Any]=3_2 , snake_case_ : Optional[Any]=5 , snake_case_ : List[Any]=4 , snake_case_ : List[Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.1 , snake_case_ : int=1_0 , snake_case_ : int=0.0_2 , snake_case_ : List[Any]=[1, 1_6, 4, 4] , snake_case_ : Any=None , ):
'''simple docstring'''
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : str = patch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : List[str] = is_training
snake_case__ : List[Any] = use_labels
snake_case__ : List[str] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : Optional[Any] = scope
snake_case__ : Optional[int] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
snake_case__ : Dict = (self.image_size // 3_2) ** 2
snake_case__ : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : int = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=snake_case_ , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = ViTHybridModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : str , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : Optional[int] = self.type_sequence_label_size
snake_case__ : int = ViTHybridForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__UpperCAmelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = ViTHybridModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
pass
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(snake_case_ )
snake_case__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : str = [*signature.parameters.keys()]
snake_case__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(config=snake_case_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
snake_case__ : Optional[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = ViTHybridModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _a ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Tuple = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case_ )
snake_case__ : Dict = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**snake_case_ )
# verify the logits
snake_case__ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Union[str, Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
snake_case__ : Optional[int] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
snake_case__ : int = prepare_img()
snake_case__ : int = image_processor(images=snake_case_ , return_tensors='''pt''' )
snake_case__ : int = model(**snake_case_ )
snake_case__ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
snake_case__ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 347
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__a = logging.get_logger(__name__)
__a = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = 'marian'
a :int = ['past_key_values']
a :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=5_8_1_0_1 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[Any]=1_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : int=1_0_2_4 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=5_8_1_0_0 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : List[Any]=5_8_1_0_0 , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Any=True , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Any:
lowercase_ = vocab_size
lowercase_ = decoder_vocab_size or vocab_size
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = use_cache
lowercase_ = encoder_layers
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _lowercase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase_ = {0: '''batch'''}
lowercase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase_ = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase_ , lowercase_ = self.num_layers
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _lowercase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase_ = super().outputs
else:
lowercase_ = super(SCREAMING_SNAKE_CASE_ , self ).outputs
if self.use_past:
lowercase_ , lowercase_ = self.num_layers
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Generate decoder inputs
lowercase_ = seq_length if not self.use_past else 1
lowercase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowercase_ = dict(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase_ , lowercase_ = common_inputs['''input_ids'''].shape
lowercase_ = common_inputs['''decoder_input_ids'''].shape[1]
lowercase_ , lowercase_ = self.num_attention_heads
lowercase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase_ = decoder_seq_length + 3
lowercase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowercase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase_ , lowercase_ = self.num_layers
lowercase_ = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - min_num_layers
lowercase_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(SCREAMING_SNAKE_CASE_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
torch.zeros(SCREAMING_SNAKE_CASE_ ),
) )
# TODO: test this.
lowercase_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) )
return common_inputs
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase_ = self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase_ , lowercase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase_ = seqlen + 2
lowercase_ , lowercase_ = self.num_layers
lowercase_ , lowercase_ = self.num_attention_heads
lowercase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase_ = common_inputs['''attention_mask'''].dtype
lowercase_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowercase_ = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(SCREAMING_SNAKE_CASE_ )
]
return common_inputs
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE_ )
lowercase_ = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE_ )
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase_ = dict(tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) )
return common_inputs
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowercase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return common_inputs
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> str:
if self.task in ["default", "seq2seq-lm"]:
lowercase_ = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = super(SCREAMING_SNAKE_CASE_ , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Optional[int] ) -> float:
return 1e-4
| 409
|
def a ( snake_case__: int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
lowercase_ = str(snake_case__ )
lowercase_ = ''''''.join(sorted(snake_case__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def a ( snake_case__: float = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
lowercase_ = 0
lowercase_ = 1
while True:
if check_bouncy(snake_case__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(9_9)}")
| 409
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Dict = '''transfo-xl'''
A__ : List[str] = ['''mems''']
A__ : Optional[int] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , _snake_case : str=26_7735 , _snake_case : Tuple=[2_0000, 4_0000, 20_0000] , _snake_case : str=1024 , _snake_case : str=1024 , _snake_case : List[str]=16 , _snake_case : Dict=64 , _snake_case : Any=4096 , _snake_case : Optional[int]=4 , _snake_case : List[str]=False , _snake_case : int=18 , _snake_case : Dict=1600 , _snake_case : List[str]=1000 , _snake_case : int=True , _snake_case : Tuple=True , _snake_case : str=0 , _snake_case : Dict=-1 , _snake_case : Optional[Any]=True , _snake_case : Any=0.1 , _snake_case : str=0.0 , _snake_case : List[Any]=True , _snake_case : List[str]="normal" , _snake_case : int=0.01 , _snake_case : Tuple=0.01 , _snake_case : Optional[Any]=0.02 , _snake_case : int=1E-5 , _snake_case : Dict=0 , **_snake_case : Optional[Any] , ):
__lowercase : List[Any] = vocab_size
__lowercase : List[str] = []
self.cutoffs.extend(_snake_case )
if proj_share_all_but_first:
__lowercase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
__lowercase : Union[str, Any] = [False] + [False] * len(self.cutoffs )
__lowercase : List[str] = d_model
__lowercase : Union[str, Any] = d_embed
__lowercase : Optional[int] = d_head
__lowercase : Any = d_inner
__lowercase : int = div_val
__lowercase : Optional[int] = pre_lnorm
__lowercase : Any = n_layer
__lowercase : Union[str, Any] = n_head
__lowercase : Any = mem_len
__lowercase : List[str] = same_length
__lowercase : Union[str, Any] = attn_type
__lowercase : List[str] = clamp_len
__lowercase : Any = sample_softmax
__lowercase : int = adaptive
__lowercase : str = dropout
__lowercase : Union[str, Any] = dropatt
__lowercase : Optional[Any] = untie_r
__lowercase : str = init
__lowercase : str = init_range
__lowercase : List[str] = proj_init_std
__lowercase : Any = init_std
__lowercase : Dict = layer_norm_epsilon
super().__init__(eos_token_id=_snake_case , **_snake_case )
@property
def snake_case_ ( self : Union[str, Any] ):
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def snake_case_ ( self : Optional[Any] , _snake_case : Any ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 509
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = '''data2vec-vision'''
def __init__( self : str , _snake_case : str=768 , _snake_case : Tuple=12 , _snake_case : Any=12 , _snake_case : Optional[int]=3072 , _snake_case : Tuple="gelu" , _snake_case : Dict=0.0 , _snake_case : Any=0.0 , _snake_case : Tuple=0.02 , _snake_case : List[Any]=1E-1_2 , _snake_case : int=224 , _snake_case : List[str]=16 , _snake_case : List[str]=3 , _snake_case : Optional[int]=False , _snake_case : str=False , _snake_case : Tuple=False , _snake_case : Tuple=False , _snake_case : Any=0.1 , _snake_case : Any=0.1 , _snake_case : List[Any]=True , _snake_case : List[Any]=[3, 5, 7, 11] , _snake_case : List[Any]=[1, 2, 3, 6] , _snake_case : Tuple=True , _snake_case : str=0.4 , _snake_case : Any=256 , _snake_case : Any=1 , _snake_case : str=False , _snake_case : str=255 , **_snake_case : Dict , ):
super().__init__(**_snake_case )
__lowercase : int = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : str = image_size
__lowercase : List[str] = patch_size
__lowercase : Dict = num_channels
__lowercase : Optional[Any] = use_mask_token
__lowercase : Optional[int] = use_absolute_position_embeddings
__lowercase : Tuple = use_relative_position_bias
__lowercase : Dict = use_shared_relative_position_bias
__lowercase : List[Any] = layer_scale_init_value
__lowercase : Union[str, Any] = drop_path_rate
__lowercase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase : List[str] = out_indices
__lowercase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase : Dict = use_auxiliary_head
__lowercase : str = auxiliary_loss_weight
__lowercase : Union[str, Any] = auxiliary_channels
__lowercase : Dict = auxiliary_num_convs
__lowercase : Dict = auxiliary_concat_input
__lowercase : Dict = semantic_loss_ignore_index
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : int = version.parse('''1.11''' )
@property
def snake_case_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self : Tuple ):
return 1E-4
| 509
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
A__ = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : Dict ) -> List[str]:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) )
A__ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
A__ = np.random.randn(3 , 4 , 5 )
A__ = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def a_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) )
A__ = np.random.randn(3 , 4 , 5 )
A__ = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) )
A__ = np.random.randn(3 , 4 , 5 )
A__ = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def a_ ( self : Any ) -> List[str]:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) )
A__ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , np.reshape(__lowerCAmelCase , (12, 5) ) ) )
@require_torch
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
A__ = np.random.randn(3 , 4 , 5 )
A__ = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , reshape(__lowerCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) )
A__ = np.random.randn(3 , 4 , 5 )
A__ = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , reshape(__lowerCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def a_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) )
A__ = np.random.randn(3 , 4 , 5 )
A__ = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , np.asarray(reshape(__lowerCAmelCase , (12, 5) ) ) ) )
def a_ ( self : Any ) -> str:
"""simple docstring"""
A__ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) )
A__ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) )
@require_torch
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = np.random.randn(1 , 3 , 4 )
A__ = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
A__ = np.random.randn(1 , 4 , 1 , 5 )
A__ = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = np.random.randn(1 , 3 , 4 )
A__ = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) )
A__ = np.random.randn(1 , 4 , 1 , 5 )
A__ = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = np.random.randn(1 , 3 , 4 )
A__ = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) )
A__ = np.random.randn(1 , 4 , 1 , 5 )
A__ = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) )
def a_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) )
@require_torch
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = torch.tensor(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def a_ ( self : Tuple ) -> int:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = tf.constant(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = np.random.randn(3 , 4 )
A__ = jnp.array(__lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
| 702
|
def __lowerCamelCase ( __a :int ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
A__ = [True] * (num + 1)
A__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __a ):
A__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 247
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ):
'''simple docstring'''
__A : Any = parent
__A : Union[str, Any] = batch_size
__A : Tuple = seq_length
__A : int = is_training
__A : str = use_attention_mask
__A : Tuple = use_token_type_ids
__A : Dict = use_labels
__A : List[Any] = vocab_size
__A : Tuple = hidden_size
__A : str = num_hidden_layers
__A : Any = num_attention_heads
__A : List[Any] = intermediate_size
__A : Optional[Any] = hidden_act
__A : int = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Tuple = type_vocab_size
__A : List[Any] = type_sequence_label_size
__A : Dict = initializer_range
__A : Tuple = num_choices
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_attention_mask:
__A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Union[str, Any] = None
if self.use_token_type_ids:
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = self.prepare_config_and_inputs()
__A ,__A ,__A ,__A : List[Any] = config_and_inputs
__A : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A : Any = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCamelCase )
__A : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[Any] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__A : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
__A : Any = model(lowerCamelCase )[0]
__A : Optional[int] = 5_0000
__A : Optional[int] = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCamelCase )
__A : Any = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 111
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_UpperCamelCase = 1
while K:
_UpperCamelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_UpperCamelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 111
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__a : Any = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
# Initialise PyTorch model
lowercase__ : str = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase__ : Optional[Any] = finetuning_task
lowercase__ : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase__ : str = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE_ )
elif "squad" in finetuning_task:
lowercase__ : Union[str, Any] = finetuning_task
lowercase__ : Union[str, Any] = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : int = XLNetLMHeadModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
lowercase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
print(F"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
torch.save(model.state_dict() ,SCREAMING_SNAKE_CASE_ )
print(F"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
with open(SCREAMING_SNAKE_CASE_ ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
__a : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 298
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 298
| 1
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE_ = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowercase__ ( lowerCAmelCase : str = "dhaka" , lowerCAmelCase : int = 5 ) -> int:
"""simple docstring"""
UpperCAmelCase = min(lowerCAmelCase , 50 ) # Prevent abuse!
UpperCAmelCase = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
UpperCAmelCase = requests.get('https://www.google.com/search' , params=lowerCAmelCase , headers=lowerCAmelCase )
UpperCAmelCase = BeautifulSoup(html.text , 'html.parser' )
UpperCAmelCase = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
UpperCAmelCase = json.dumps(lowerCAmelCase )
UpperCAmelCase = json.loads(lowerCAmelCase )
UpperCAmelCase = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , lowerCAmelCase , )
if not matched_google_image_data:
return 0
UpperCAmelCase = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(lowerCAmelCase ) , )
UpperCAmelCase = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , lowerCAmelCase , )
for index, fixed_full_res_image in enumerate(lowerCAmelCase ):
if index >= max_images:
return index
UpperCAmelCase = bytes(lowerCAmelCase , 'ascii' ).decode(
'unicode-escape' )
UpperCAmelCase = bytes(lowerCAmelCase , 'ascii' ).decode(
'unicode-escape' )
UpperCAmelCase = urllib.request.build_opener()
UpperCAmelCase = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(lowerCAmelCase )
UpperCAmelCase = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
lowerCAmelCase , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
SCREAMING_SNAKE_CASE_ = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 373
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[Any] = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE : Dict = "AutoImageProcessor"
__SCREAMING_SNAKE_CASE : Optional[int] = "AutoTokenizer"
def __init__( self , lowercase_ , lowercase_ ) -> List[str]:
super().__init__(lowercase_ , lowercase_ )
UpperCAmelCase = self.image_processor
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Any:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def a_ ( self , *lowercase_ , **lowercase_ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def a_ ( self , *lowercase_ , **lowercase_ ) -> Optional[int]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> Optional[int]:
return ["input_ids", "attention_mask", "pixel_values"]
| 373
| 1
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
lowerCAmelCase : Optional[Any] = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
lowerCAmelCase : List[Any] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def UpperCAmelCase_ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase , hypotheses=UpperCamelCase , min_len=UpperCamelCase , max_len=UpperCamelCase )
}
| 39
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple:
__lowerCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = model
__lowerCAmelCase = cache
__lowerCAmelCase = force
__lowerCAmelCase = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 39
| 1
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : List[Any] = sin(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = cos(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Optional[int] = (1 - _cos) / 2
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - _cos
SCREAMING_SNAKE_CASE : Optional[int] = 1 + alpha
SCREAMING_SNAKE_CASE : List[Any] = -2 * _cos
SCREAMING_SNAKE_CASE : Tuple = 1 - alpha
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Dict = sin(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = cos(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Any = (1 + _cos) / 2
SCREAMING_SNAKE_CASE : Tuple = -1 - _cos
SCREAMING_SNAKE_CASE : Optional[int] = 1 + alpha
SCREAMING_SNAKE_CASE : Any = -2 * _cos
SCREAMING_SNAKE_CASE : int = 1 - alpha
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Union[str, Any] = sin(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = cos(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : Dict = _sin / 2
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = -ba
SCREAMING_SNAKE_CASE : List[Any] = 1 + alpha
SCREAMING_SNAKE_CASE : Union[str, Any] = -2 * _cos
SCREAMING_SNAKE_CASE : Any = 1 - alpha
SCREAMING_SNAKE_CASE : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Dict = sin(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = cos(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : int = 1 - alpha
SCREAMING_SNAKE_CASE : Any = -2 * _cos
SCREAMING_SNAKE_CASE : Optional[int] = 1 + alpha
SCREAMING_SNAKE_CASE : List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : str = sin(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = cos(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : str = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : str = 1 + alpha * big_a
SCREAMING_SNAKE_CASE : Optional[Any] = -2 * _cos
SCREAMING_SNAKE_CASE : Any = 1 - alpha * big_a
SCREAMING_SNAKE_CASE : Optional[int] = 1 + alpha / big_a
SCREAMING_SNAKE_CASE : Tuple = -2 * _cos
SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha / big_a
SCREAMING_SNAKE_CASE : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Optional[int] = sin(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = cos(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[str] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : List[Any] = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : str = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Optional[int] = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : str = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : List[Any] = 2 * sqrt(lowerCamelCase_ ) * alpha
SCREAMING_SNAKE_CASE : Optional[Any] = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE : List[Any] = 2 * big_a * mpc
SCREAMING_SNAKE_CASE : Tuple = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE : Optional[Any] = ppmc + aaa
SCREAMING_SNAKE_CASE : Optional[Any] = -2 * pmpc
SCREAMING_SNAKE_CASE : Optional[int] = ppmc - aaa
SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tau * frequency / samplerate
SCREAMING_SNAKE_CASE : Dict = sin(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = cos(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE : List[str] = 10 ** (gain_db / 40)
SCREAMING_SNAKE_CASE : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : Tuple = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE : List[str] = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : int = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE : str = 2 * sqrt(lowerCamelCase_ ) * alpha
SCREAMING_SNAKE_CASE : List[str] = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE : Dict = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE : str = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE : List[str] = pmc + aaa
SCREAMING_SNAKE_CASE : Tuple = 2 * mpc
SCREAMING_SNAKE_CASE : int = pmc - aaa
SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 379
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''donut-swin'''
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , lowerCamelCase_ : Union[str, Any]=2_24 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Dict=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Optional[Any]=[3, 6, 12, 24] , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : List[str]=4.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Any=1e-5 , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = qkv_bias
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : str = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
| 379
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Tuple = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int ="""pix2struct_text_model"""
__UpperCAmelCase : int =["""past_key_values"""]
__UpperCAmelCase : int ={
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __a=5_02_44 , __a=7_68 , __a=64 , __a=20_48 , __a=12 , __a=12 , __a=32 , __a=1_28 , __a=0.1 , __a=1e-6 , __a=1.0 , __a="gelu_new" , __a=0 , __a=False , __a=0 , __a=1 , __a=False , __a=True , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = d_kv
__lowerCAmelCase = d_ff
__lowerCAmelCase = num_layers
__lowerCAmelCase = num_heads
__lowerCAmelCase = relative_attention_num_buckets
__lowerCAmelCase = relative_attention_max_distance
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = use_cache
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = decoder_start_token_id
# for backwards compatibility
__lowerCAmelCase = dense_act_fn
super().__init__(
pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , tie_word_embeddings=__a , is_decoder=__a , **__a , )
@classmethod
def snake_case ( cls , __a , **__a ):
cls._set_token_in_kwargs(__a )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__lowerCAmelCase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""pix2struct_vision_model"""
def __init__( self , __a=7_68 , __a=7_68 , __a=20_48 , __a=64 , __a=12 , __a=12 , __a="gelu_new" , __a=1e-6 , __a=0.0 , __a=0.0 , __a=1e-1_0 , __a=1.0 , __a=40_96 , __a=32 , __a=1_28 , **__a , ):
super().__init__(**__a )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = patch_embed_hidden_size
__lowerCAmelCase = d_ff
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = initializer_range
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = dense_act_fn
__lowerCAmelCase = seq_len
__lowerCAmelCase = relative_attention_num_buckets
__lowerCAmelCase = relative_attention_max_distance
__lowerCAmelCase = d_kv
@classmethod
def snake_case ( cls , __a , **__a ):
cls._set_token_in_kwargs(__a )
__lowerCAmelCase , __lowerCAmelCase = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__lowerCAmelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] ="""pix2struct"""
__UpperCAmelCase : Optional[int] =True
def __init__( self , __a=None , __a=None , __a=1.0 , __a=0.0_2 , __a=False , __a=False , __a=True , **__a , ):
super().__init__(tie_word_embeddings=__a , is_encoder_decoder=__a , **__a )
if text_config is None:
__lowerCAmelCase = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
__lowerCAmelCase = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
__lowerCAmelCase = PixaStructTextConfig(**__a )
__lowerCAmelCase = PixaStructVisionConfig(**__a )
__lowerCAmelCase = self.text_config.decoder_start_token_id
__lowerCAmelCase = self.text_config.pad_token_id
__lowerCAmelCase = self.text_config.eos_token_id
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = initializer_range
__lowerCAmelCase = self.initializer_range
__lowerCAmelCase = self.initializer_range
__lowerCAmelCase = is_vqa
@classmethod
def snake_case ( cls , __a , __a , **__a ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.text_config.to_dict()
__lowerCAmelCase = self.vision_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 282
|
"""simple docstring"""
from itertools import product
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = sides_number
__lowerCAmelCase = max_face_number * dice_number
__lowerCAmelCase = [0] * (max_total + 1)
__lowerCAmelCase = 1
__lowerCAmelCase = range(_UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCamelCase , repeat=_UpperCamelCase ):
__lowerCAmelCase = sum(_UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__lowerCAmelCase = 0
__lowerCAmelCase = 9
__lowerCAmelCase = 4 * 9
__lowerCAmelCase = 6
for peter_total in range(_UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__lowerCAmelCase = (4**9) * (6**6)
__lowerCAmelCase = peter_wins_count / total_games_number
__lowerCAmelCase = round(_UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 282
| 1
|
from __future__ import annotations
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
# Checks if the entire collection has been sorted
if len(__magic_name__ ) <= 1 or n <= 1:
return
insert_next(__magic_name__ , n - 1 )
rec_insertion_sort(__magic_name__ , n - 1 )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
# Checks order between adjacent elements
if index >= len(__magic_name__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_lowercase , _lowercase: Tuple = (
collection[index],
collection[index - 1],
)
insert_next(__magic_name__ , index + 1 )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = input('Enter integers separated by spaces: ')
_SCREAMING_SNAKE_CASE : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 226
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_SCREAMING_SNAKE_CASE : Optional[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __lowerCAmelCase ( ):
_lowercase: List[Any] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowercase: Any = get_sagemaker_input()
else:
_lowercase: Tuple = get_cluster_input()
return config
def __lowerCAmelCase ( __magic_name__=None ):
if subparsers is not None:
_lowercase: List[Any] = subparsers.add_parser("config" , description=__magic_name__ )
else:
_lowercase: List[Any] = argparse.ArgumentParser("Accelerate config command" , description=__magic_name__ )
parser.add_argument(
"--config_file" , default=__magic_name__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__magic_name__ )
return parser
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Union[str, Any] = get_user_input()
if args.config_file is not None:
_lowercase: Dict = args.config_file
else:
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
_lowercase: Tuple = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__magic_name__ )
else:
config.to_yaml_file(__magic_name__ )
print(f"accelerate configuration saved at {config_file}" )
def __lowerCAmelCase ( ):
_lowercase: int = config_command_parser()
_lowercase: List[Any] = parser.parse_args()
config_command(__magic_name__ )
if __name__ == "__main__":
main()
| 226
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class a :
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
__UpperCAmelCase : list[Any] = []
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
def lowerCamelCase__ ( self : List[str] ) -> bool:
return self.head == self.tail
def lowerCamelCase__ ( self : str , snake_case : Any ) -> None:
self.data.append(snake_case )
__UpperCAmelCase : Any = self.tail + 1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
__UpperCAmelCase : int = self.data[self.head]
__UpperCAmelCase : int = self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[int] ) -> int:
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : Any ) -> None:
__UpperCAmelCase : int = data
__UpperCAmelCase : MyNode | None = None
__UpperCAmelCase : MyNode | None = None
__UpperCAmelCase : int = 1
def lowerCamelCase__ ( self : str ) -> Any:
return self.data
def lowerCamelCase__ ( self : Any ) -> MyNode | None:
return self.left
def lowerCamelCase__ ( self : str ) -> MyNode | None:
return self.right
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
return self.height
def lowerCamelCase__ ( self : Tuple , snake_case : Any ) -> None:
__UpperCAmelCase : List[str] = data
def lowerCamelCase__ ( self : str , snake_case : MyNode | None ) -> None:
__UpperCAmelCase : int = node
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : MyNode | None ) -> None:
__UpperCAmelCase : str = node
def lowerCamelCase__ ( self : Tuple , snake_case : int ) -> None:
__UpperCAmelCase : str = height
def _a ( _lowercase : MyNode | None ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def _a ( _lowercase : int , _lowercase : int ):
'''simple docstring'''
if a > b:
return a
return b
def _a ( _lowercase : MyNode ):
'''simple docstring'''
print('''left rotation node:''' , node.get_data() )
__UpperCAmelCase : Any = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowercase )
__UpperCAmelCase : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
__UpperCAmelCase : Optional[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowercase )
return ret
def _a ( _lowercase : MyNode ):
'''simple docstring'''
print('''right rotation node:''' , node.get_data() )
__UpperCAmelCase : List[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowercase )
__UpperCAmelCase : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
__UpperCAmelCase : int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowercase )
return ret
def _a ( _lowercase : MyNode ):
'''simple docstring'''
__UpperCAmelCase : List[str] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowercase ) )
return right_rotation(_lowercase )
def _a ( _lowercase : MyNode ):
'''simple docstring'''
__UpperCAmelCase : Dict = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowercase ) )
return left_rotation(_lowercase )
def _a ( _lowercase : MyNode | None , _lowercase : Any ):
'''simple docstring'''
if node is None:
return MyNode(_lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__UpperCAmelCase : List[str] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__UpperCAmelCase : Dict = right_rotation(_lowercase )
else:
__UpperCAmelCase : List[str] = lr_rotation(_lowercase )
else:
node.set_right(insert_node(node.get_right() , _lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__UpperCAmelCase : List[str] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__UpperCAmelCase : List[Any] = rl_rotation(_lowercase )
else:
__UpperCAmelCase : Union[str, Any] = left_rotation(_lowercase )
__UpperCAmelCase : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowercase )
return node
def _a ( _lowercase : MyNode ):
'''simple docstring'''
while True:
__UpperCAmelCase : List[Any] = root.get_right()
if right_child is None:
break
__UpperCAmelCase : List[Any] = right_child
return root.get_data()
def _a ( _lowercase : MyNode ):
'''simple docstring'''
while True:
__UpperCAmelCase : List[str] = root.get_left()
if left_child is None:
break
__UpperCAmelCase : str = left_child
return root.get_data()
def _a ( _lowercase : MyNode , _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = root.get_left()
__UpperCAmelCase : Any = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__UpperCAmelCase : List[str] = get_left_most(_lowercase )
root.set_data(_lowercase )
root.set_right(del_node(_lowercase , _lowercase ) )
elif left_child is not None:
__UpperCAmelCase : Any = left_child
elif right_child is not None:
__UpperCAmelCase : Optional[int] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(_lowercase , _lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowercase , _lowercase ) )
if get_height(_lowercase ) - get_height(_lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__UpperCAmelCase : int = left_rotation(_lowercase )
else:
__UpperCAmelCase : Union[str, Any] = rl_rotation(_lowercase )
elif get_height(_lowercase ) - get_height(_lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__UpperCAmelCase : Tuple = right_rotation(_lowercase )
else:
__UpperCAmelCase : List[Any] = lr_rotation(_lowercase )
__UpperCAmelCase : int = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowercase )
return root
class a :
"""simple docstring"""
def __init__( self : Tuple ) -> None:
__UpperCAmelCase : MyNode | None = None
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
return get_height(self.root )
def lowerCamelCase__ ( self : Any , snake_case : Any ) -> None:
print('''insert:''' + str(snake_case ) )
__UpperCAmelCase : Optional[int] = insert_node(self.root , snake_case )
def lowerCamelCase__ ( self : Any , snake_case : Any ) -> None:
print('''delete:''' + str(snake_case ) )
if self.root is None:
print('''Tree is empty!''' )
return
__UpperCAmelCase : List[str] = del_node(self.root , snake_case )
def __str__( self : str , ) -> str: # a level traversale, gives a more intuitive look on the tree
__UpperCAmelCase : List[Any] = ''''''
__UpperCAmelCase : Tuple = MyQueue()
q.push(self.root )
__UpperCAmelCase : Any = self.get_height()
if layer == 0:
return output
__UpperCAmelCase : Optional[Any] = 0
while not q.is_empty():
__UpperCAmelCase : Any = q.pop()
__UpperCAmelCase : str = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(snake_case )
q.push(snake_case )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__UpperCAmelCase : Optional[int] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , snake_case ) - 1:
__UpperCAmelCase : str = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _a ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__UpperCAmelCase :List[Any] = AVLtree()
__UpperCAmelCase :int = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 266
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase :Optional[int] = TypeVar("KEY")
__UpperCAmelCase :Tuple = TypeVar("VAL")
@dataclass(frozen=_a , slots=_a )
class a ( Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class a ( _Item ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
super().__init__(snake_case , snake_case )
def __bool__( self : Dict ) -> bool:
return False
__UpperCAmelCase :Optional[Any] = _DeletedItem()
class a ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : str , snake_case : int = 8 , snake_case : float = 0.75 ) -> None:
__UpperCAmelCase : Dict = initial_block_size
__UpperCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCAmelCase : str = capacity_factor
__UpperCAmelCase : Optional[int] = 0
def lowerCamelCase__ ( self : Optional[Any] , snake_case : KEY ) -> int:
return hash(snake_case ) % len(self._buckets )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> int:
return (ind + 1) % len(self._buckets )
def lowerCamelCase__ ( self : List[str] , snake_case : int , snake_case : KEY , snake_case : VAL ) -> bool:
__UpperCAmelCase : int = self._buckets[ind]
if not stored:
__UpperCAmelCase : List[str] = _Item(snake_case , snake_case )
self._len += 1
return True
elif stored.key == key:
__UpperCAmelCase : int = _Item(snake_case , snake_case )
return True
else:
return False
def lowerCamelCase__ ( self : str ) -> bool:
__UpperCAmelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(snake_case )
def lowerCamelCase__ ( self : List[str] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCAmelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> None:
__UpperCAmelCase : Union[str, Any] = self._buckets
__UpperCAmelCase : Union[str, Any] = [None] * new_size
__UpperCAmelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase__ ( self : Optional[int] ) -> None:
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase__ ( self : List[str] ) -> None:
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : KEY ) -> Iterator[int]:
__UpperCAmelCase : Tuple = self._get_bucket_index(snake_case )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCAmelCase : Dict = self._get_next_ind(snake_case )
def lowerCamelCase__ ( self : str , snake_case : KEY , snake_case : VAL ) -> None:
for ind in self._iterate_buckets(snake_case ):
if self._try_set(snake_case , snake_case , snake_case ):
break
def __setitem__( self : List[str] , snake_case : KEY , snake_case : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(snake_case , snake_case )
def __delitem__( self : Any , snake_case : KEY ) -> None:
for ind in self._iterate_buckets(snake_case ):
__UpperCAmelCase : List[Any] = self._buckets[ind]
if item is None:
raise KeyError(snake_case )
if item is _deleted:
continue
if item.key == key:
__UpperCAmelCase : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , snake_case : KEY ) -> VAL:
for ind in self._iterate_buckets(snake_case ):
__UpperCAmelCase : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(snake_case )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : str ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
__UpperCAmelCase : str = ''' ,'''.join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 266
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase_ (unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=4 , ) -> Dict:
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_attention_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_choices
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ =None
if self.use_attention_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length])
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self) -> Tuple:
a__ =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ =config_and_inputs
a__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self) -> List[str]:
a__ =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ =config_and_inputs
a__ =True
a__ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a__ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =True
snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self) -> List[str]:
a__ =FlaxRobertaModelTester(self)
@slow
def __UpperCamelCase ( self) -> List[Any]:
for model_class_name in self.all_model_classes:
a__ =model_class_name.from_pretrained('roberta-base' , from_pt=lowercase_)
a__ =model(np.ones((1, 1)))
self.assertIsNotNone(lowercase_)
| 20
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = """roberta"""
def __init__( self, _lowercase=50265, _lowercase=768, _lowercase=12, _lowercase=12, _lowercase=3072, _lowercase="gelu", _lowercase=0.1, _lowercase=0.1, _lowercase=512, _lowercase=2, _lowercase=0.02, _lowercase=1E-12, _lowercase=1, _lowercase=0, _lowercase=2, _lowercase="absolute", _lowercase=True, _lowercase=None, **_lowercase, ) -> Dict:
super().__init__(pad_token_id=_lowercase, bos_token_id=_lowercase, eos_token_id=_lowercase, **_lowercase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
class snake_case ( lowercase_ ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 294
| 0
|
"""simple docstring"""
class a__ :
def __init__( self ):
lowercase : List[Any] = ""
lowercase : Dict = ""
lowercase : Optional[int] = []
def __magic_name__ ( self , _a , _a ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase : Dict = self.__min_dist_top_down_dp(_a , n - 1 )
lowercase : Dict = self.__min_dist_top_down_dp(m - 1 , _a )
lowercase : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase : str = 1 + min(_a , _a , _a )
return self.dp[m][n]
def __magic_name__ ( self , _a , _a ):
lowercase : str = worda
lowercase : Optional[Any] = worda
lowercase : List[str] = [[-1 for _ in range(len(_a ) )] for _ in range(len(_a ) )]
return self.__min_dist_top_down_dp(len(_a ) - 1 , len(_a ) - 1 )
def __magic_name__ ( self , _a , _a ):
lowercase : Dict = worda
lowercase : Dict = worda
lowercase : Optional[Any] = len(_a )
lowercase : Optional[Any] = len(_a )
lowercase : Any = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase : str = j
elif j == 0: # second string is empty
lowercase : List[str] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase : List[Any] = self.dp[i - 1][j - 1]
else:
lowercase : Optional[Any] = self.dp[i][j - 1]
lowercase : Any = self.dp[i - 1][j]
lowercase : int = self.dp[i - 1][j - 1]
lowercase : Optional[Any] = 1 + min(_a , _a , _a )
return self.dp[m][n]
if __name__ == "__main__":
_A : List[str] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_A : List[Any] = input("""Enter the first string: """).strip()
_A : Optional[Any] = input("""Enter the second string: """).strip()
print()
print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 518
|
"""simple docstring"""
def __magic_name__ ( __snake_case : str ) -> list:
lowercase : Optional[Any] = [0] * len(__snake_case )
for i in range(1 , len(__snake_case ) ):
# use last results for better performance - dynamic programming
lowercase : int = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase : Any = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase : List[str] = j
return prefix_result
def __magic_name__ ( __snake_case : str ) -> int:
return max(prefix_function(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 518
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if len(_A ) == 0:
return False
SCREAMING_SNAKE_CASE__ = len(_A ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _A )
else:
return binary_search(a_list[midpoint + 1 :] , _A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by comma:\n''').strip()
_SCREAMING_SNAKE_CASE : Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
_SCREAMING_SNAKE_CASE : Optional[int] = int(input('''Enter the number to be found in the list:\n''').strip())
_SCREAMING_SNAKE_CASE : List[Any] = '''''' if binary_search(sequence, target) else '''not '''
print(F"{target} was {not_str}found in {sequence}")
| 493
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , __lowerCamelCase : int = 128 , __lowerCamelCase : int = 256 , __lowerCamelCase : float = 2000.0 , __lowerCamelCase : int = 768 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 2048 , __lowerCamelCase : float = 0.1 , ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
SCREAMING_SNAKE_CASE__ = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(p=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE__ = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(p=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase_ ( self : int , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> Any:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE__ = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE__ = self.position_encoding(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
SCREAMING_SNAKE_CASE__ = self.dropout(__lowerCamelCase )
# decoder: No padding present.
SCREAMING_SNAKE_CASE__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE__ = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE__ = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = self.decoder_norm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.post_dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.spec_out(__lowerCamelCase )
return spec_out
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=1e-6 ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE__ = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE__ = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[str] ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE__ = TaLayerNorm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Union[str, Any]=None , ) -> Dict:
# pre_self_attention_layer_norm
SCREAMING_SNAKE_CASE__ = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
SCREAMING_SNAKE_CASE__ = self.attention(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(__lowerCamelCase )
def lowercase_ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.layer_norm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(__lowerCamelCase )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=None ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ = self.film(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.DenseReluDense(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.Dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = NewGELUActivation()
def lowercase_ ( self : List[Any] , __lowerCamelCase : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.act(self.wi_a(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = self.wi_a(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE__ = self.dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.wo(__lowerCamelCase )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]=1e-6 ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.ones(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = eps
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
SCREAMING_SNAKE_CASE__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def lowercase_ ( self : str , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.scale_bias(__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = torch.chunk(__lowerCamelCase , 2 , -1 )
SCREAMING_SNAKE_CASE__ = x * (1 + scale) + shift
return x
| 493
| 1
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 701
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669
| 0
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 1 , lowercase_ : int = 10_00 ) -> int:
_lowerCamelCase = 1
_lowerCamelCase = 0
for divide_by_number in range(lowercase_ , digit + 1 ):
_lowerCamelCase = []
_lowerCamelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase_ ):
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = divide_by_number
else:
has_been_divided.append(lowercase_ )
_lowerCamelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 661
|
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase_( lowercase_ : int = 2_00_00_00 ) -> int:
_lowerCamelCase = [0]
_lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCamelCase = 0
# an estimate of b, using the quadratic formula
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the largest integer less than b_estimate
_lowerCamelCase = 42
# the triangle number corresponding to b_floor
_lowerCamelCase = 42
# the triangle number corresponding to b_ceil
_lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCamelCase = floor(lowercase_ )
_lowerCamelCase = ceil(lowercase_ )
_lowerCamelCase = triangle_numbers[b_floor]
_lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_first_guess * triangle_a
_lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCamelCase = triangle_b_second_guess * triangle_a
_lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 661
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCamelCase_ ):
_snake_case = '''timm_backbone'''
def __init__( self : int, SCREAMING_SNAKE_CASE_ : Any=None, SCREAMING_SNAKE_CASE_ : str=3, SCREAMING_SNAKE_CASE_ : List[str]=True, SCREAMING_SNAKE_CASE_ : str=True, SCREAMING_SNAKE_CASE_ : List[str]=None, **SCREAMING_SNAKE_CASE_ : List[str], ):
super().__init__(**UpperCamelCase__ )
snake_case : Optional[Any] = backbone
snake_case : str = num_channels
snake_case : Dict = features_only
snake_case : Dict = use_pretrained_backbone
snake_case : List[str] = True
snake_case : int = out_indices if out_indices is not None else (-1,)
| 712
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 555
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
lowercase_ = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase_ = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
lowercase_ = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
lowercase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 97
|
'''simple docstring'''
import math
import sys
def lowercase__ ( __UpperCamelCase )-> int:
if number != int(__UpperCamelCase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
UpperCamelCase = [-1] * (number + 1)
UpperCamelCase = 0
for i in range(1 , number + 1 ):
UpperCamelCase = sys.maxsize
UpperCamelCase = int(math.sqrt(__UpperCamelCase ) )
for j in range(1 , root + 1 ):
UpperCamelCase = 1 + answers[i - (j**2)]
UpperCamelCase = min(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301
| 0
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase__ = """__DUMMY_TRANSFORMERS_USER__"""
lowerCAmelCase__ = """Dummy User"""
lowerCAmelCase__ = """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"""
lowerCAmelCase__ = """https://hub-ci.huggingface.co"""
lowerCAmelCase__ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
lowerCAmelCase__ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
lowerCAmelCase__ = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Dict:
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , UpperCAmelCase_ )
@pytest.fixture
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , UpperCAmelCase_ )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , UpperCAmelCase_ )
@pytest.fixture
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , UpperCAmelCase_ )
@pytest.fixture
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
HfFolder.save_token(UpperCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
return HfApi(endpoint=UpperCAmelCase_ )
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( UpperCAmelCase_ : HfApi ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : str = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase_ )
@pytest.fixture
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
def _cleanup_repo(UpperCAmelCase_ : List[str] ):
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCAmelCase_ : Union[str, Any] ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = F'''repo_txt_data-{int(time.time() * 1_0e3 )}'''
_UpperCamelCase : Union[str, Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data/text_data.txt' , repo_id=UpperCAmelCase_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = F'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}'''
_UpperCamelCase : str = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data.zip' , repo_id=UpperCAmelCase_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase : str = F'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}'''
_UpperCamelCase : int = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data.zip' , repo_id=UpperCAmelCase_ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 648
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 1
|
def UpperCamelCase__ ( UpperCAmelCase_ ) -> int:
'''simple docstring'''
_lowercase : Tuple = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
_lowercase : int = hex_num[0] == '''-'''
if is_negative:
_lowercase : List[str] = hex_num[1:]
try:
_lowercase : str = int(UpperCAmelCase_ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
_lowercase : Optional[int] = ''''''
while int_num > 0:
_lowercase : List[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = '''mra'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any]=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : int=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : Union[str, Any]=1E-5 , UpperCamelCase : Tuple="absolute" , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[Any]="full" , UpperCamelCase : List[str]=0 , UpperCamelCase : Tuple=0 , UpperCamelCase : Any=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[int]=2 , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_lowercase : List[Any] = vocab_size
_lowercase : Any = max_position_embeddings
_lowercase : List[str] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Any = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = initializer_range
_lowercase : Dict = type_vocab_size
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : List[str] = block_per_row
_lowercase : int = approx_mode
_lowercase : Optional[Any] = initial_prior_first_n_blocks
_lowercase : Dict = initial_prior_diagonal_n_blocks
| 322
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :str = logging.get_logger(__name__)
lowerCamelCase :Dict = '▁'
lowerCamelCase :Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase :List[Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
lowerCamelCase :List[str] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
lowerCamelCase :Dict = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCAmelCase ( __snake_case ):
a: List[str] = VOCAB_FILES_NAMES
a: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a: List[str] = ["input_ids", "attention_mask"]
a: List[int] = []
a: List[int] = []
def __init__( self: Any , __UpperCamelCase: Tuple , __UpperCamelCase: str="<s>" , __UpperCamelCase: Dict="</s>" , __UpperCamelCase: Union[str, Any]="</s>" , __UpperCamelCase: str="<s>" , __UpperCamelCase: List[str]="<unk>" , __UpperCamelCase: Union[str, Any]="<pad>" , __UpperCamelCase: List[Any]="<mask>" , __UpperCamelCase: Optional[Any]=None , __UpperCamelCase: Tuple=None , __UpperCamelCase: Optional[int]=None , __UpperCamelCase: Optional[Dict[str, Any]] = None , __UpperCamelCase: Optional[int]=None , __UpperCamelCase: Optional[int]=False , **__UpperCamelCase: int , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = legacy_behaviour
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenizer_file=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__UpperCamelCase , **__UpperCamelCase , )
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_a = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a = 1
_a = len(self.sp_model )
_a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCamelCase )
}
_a = {v: k for k, v in self.lang_code_to_id.items()}
_a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_a = src_lang if src_lang is not None else '''eng_Latn'''
_a = self.lang_code_to_id[self._src_lang]
_a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: int ):
_a = self.__dict__.copy()
_a = None
_a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self: List[str] , __UpperCamelCase: Any ):
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _A ( self: Dict ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _A ( self: Optional[int] ):
return self._src_lang
@src_lang.setter
def _A ( self: int , __UpperCamelCase: str ):
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A ( self: Any , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None , __UpperCamelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def _A ( self: Union[str, Any] , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self: List[Any] , __UpperCamelCase: List[int] , __UpperCamelCase: Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self: Tuple , __UpperCamelCase: Any , __UpperCamelCase: str , __UpperCamelCase: Optional[str] , __UpperCamelCase: Optional[str] , **__UpperCamelCase: Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_a = src_lang
_a = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
_a = self.convert_tokens_to_ids(__UpperCamelCase )
_a = tgt_lang_id
return inputs
def _A ( self: Dict ):
_a = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self: Dict , __UpperCamelCase: str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _A ( self: List[str] , __UpperCamelCase: str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self: Optional[Any] , __UpperCamelCase: Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self: Optional[int] , __UpperCamelCase: int ):
_a = ''''''.join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip()
return out_string
def _A ( self: List[str] , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _A ( self: Optional[int] , __UpperCamelCase: List[str] , __UpperCamelCase: str = "eng_Latn" , __UpperCamelCase: Optional[List[str]] = None , __UpperCamelCase: str = "fra_Latn" , **__UpperCamelCase: Tuple , ):
_a = src_lang
_a = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def _A ( self: Any ):
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self: Union[str, Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self: Any , __UpperCamelCase: List[Any] ):
_a = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_a = []
_a = [self.eos_token_id, self.cur_lang_code]
else:
_a = [self.cur_lang_code]
_a = [self.eos_token_id]
def _A ( self: Optional[Any] , __UpperCamelCase: str ):
_a = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_a = []
_a = [self.eos_token_id, self.cur_lang_code]
else:
_a = [self.cur_lang_code]
_a = [self.eos_token_id]
| 346
|
def __snake_case ( _UpperCamelCase ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
_a = len(bin(_UpperCamelCase )[3:] )
_a = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
_a = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A : Optional[Any] = TypeVar('T')
A : List[Any] = TypeVar('U')
class lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :T | None , lowerCamelCase_ :U | None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self :Optional[Any] ) -> str:
"""simple docstring"""
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self :Optional[Any] ) -> None:
"""simple docstring"""
UpperCamelCase__ = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__( self :Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase__ = ["DoubleLinkedList"]
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(lowerCamelCase_ ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase_ )
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :DoubleLinkedListNode[T, U] ) -> None:
"""simple docstring"""
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
A = {}
def __init__( self :List[str] , lowerCamelCase_ :int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__( self :List[Any] ) -> str:
"""simple docstring"""
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self :Optional[int] , lowerCamelCase_ :T ) -> bool:
"""simple docstring"""
return key in self.cache
def lowerCamelCase__ ( self :Union[str, Any] , lowerCamelCase_ :T ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase_ )
return node.val
self.miss += 1
return None
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :T , lowerCamelCase_ :U ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(lowerCamelCase_ )
@classmethod
def lowerCamelCase__ ( cls :Optional[int] , lowerCamelCase_ :int = 1_2_8 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowerCamelCase_ :Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase_ :T ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(lowerCamelCase_ )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*lowerCamelCase_ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase_ , "cache_info" , lowerCamelCase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : Any = logging.get_logger(__name__)
def snake_case__ ( _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = original_name.split("." )[0]
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = int(key_list[key_list.index(_snake_case ) - 2] )
UpperCamelCase__ = int(key_list[key_list.index(_snake_case ) - 1] )
UpperCamelCase__ = orig_block_num - offset
UpperCamelCase__ = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def snake_case__ ( _snake_case : Tuple ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ , UpperCamelCase__ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
UpperCamelCase__ = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
UpperCamelCase__ = key[: key.find("proj" )]
UpperCamelCase__ = key.replace(_snake_case , F'patch_embeddings.{total_embed_found}.' )
UpperCamelCase__ = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCamelCase__ = "poolformer.encoder." + key
if "mlp.fc1" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "norm1" , "before_norm" )
if "norm2" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "norm2" , "after_norm" )
if "layer_scale_1" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
UpperCamelCase__ = replace_key_with_offset(_snake_case , _snake_case , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
UpperCamelCase__ = key.replace("head" , "classifier" )
UpperCamelCase__ = value
return new_state_dict
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def snake_case__ ( _snake_case : Any , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = PoolFormerConfig()
# set attributes based on model_name
UpperCamelCase__ = "huggingface/label-files"
UpperCamelCase__ = model_name[-3:]
UpperCamelCase__ = 10_00
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = (1, 10_00)
# set config attributes
UpperCamelCase__ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCamelCase__ = [2, 2, 6, 2]
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 0.9
elif size == "s24":
UpperCamelCase__ = [4, 4, 12, 4]
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 0.9
elif size == "s36":
UpperCamelCase__ = [6, 6, 18, 6]
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 1E-6
UpperCamelCase__ = 0.9
elif size == "m36":
UpperCamelCase__ = [6, 6, 18, 6]
UpperCamelCase__ = [96, 1_92, 3_84, 7_68]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 1E-6
UpperCamelCase__ = 0.95
elif size == "m48":
UpperCamelCase__ = [8, 8, 24, 8]
UpperCamelCase__ = [96, 1_92, 3_84, 7_68]
UpperCamelCase__ = 4.0
UpperCamelCase__ = 1E-6
UpperCamelCase__ = 0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
UpperCamelCase__ = PoolFormerImageProcessor(crop_pct=_snake_case )
# Prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_snake_case , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
UpperCamelCase__ = torch.load(_snake_case , map_location=torch.device("cpu" ) )
# rename keys
UpperCamelCase__ = rename_keys(_snake_case )
# create HuggingFace model and load state dict
UpperCamelCase__ = PoolFormerForImageClassification(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Define image processor
UpperCamelCase__ = PoolFormerImageProcessor(crop_pct=_snake_case )
UpperCamelCase__ = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
UpperCamelCase__ = model(_snake_case )
UpperCamelCase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCamelCase__ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
UpperCamelCase__ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
UpperCamelCase__ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
UpperCamelCase__ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
UpperCamelCase__ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A : List[str] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 516
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int = None , UpperCamelCase_ : int = None ) -> Any:
'''simple docstring'''
super().__init__()
_lowercase : Tuple = pad_token_id
_lowercase : int = max_length
_lowercase : List[Any] = vocab
_lowercase : Tuple = merges
_lowercase : int = BytePairTokenizer(UpperCamelCase_ , UpperCamelCase_ , sequence_length=UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : Any , UpperCamelCase_ : GPTaTokenizer , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = [' '.join(UpperCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
_lowercase : int = tokenizer.get_vocab()
return cls(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : int , UpperCamelCase_ : Union[str, os.PathLike] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
_lowercase : int = GPTaTokenizer.from_pretrained(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
return cls.from_tokenizer(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Any ) -> List[str]:
'''simple docstring'''
return cls(**UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int = None ) -> List[Any]:
'''simple docstring'''
_lowercase : Any = self.tf_tokenizer(UpperCamelCase_ )
_lowercase : Optional[Any] = tf.ones_like(UpperCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowercase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowercase , _lowercase : List[Any] = pad_model_inputs(
UpperCamelCase_ , max_seq_length=UpperCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 4
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( _lowercase ) -> None:
_lowercase , _lowercase : List[Any] = analyze_text(_lowercase )
_lowercase : Any = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
_lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Any = single_char_strings[ch]
_lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
_lowercase : int = two_char_strings[sequence]
_lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __UpperCamelCase ( _lowercase ) -> tuple[dict, dict]:
_lowercase : Optional[Any] = Counter() # type: ignore
_lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 4
| 1
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
lowerCAmelCase: int ="https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowerCAmelCase: Optional[Any] =requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
lowerCAmelCase: int =BeautifulSoup(res.text, "html.parser")
lowerCAmelCase: int =list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 607
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( __UpperCamelCase , unittest.TestCase ):
__UpperCAmelCase = MvpTokenizer
__UpperCAmelCase = MvpTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = filter_roberta_detectors
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase : Optional[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase : int = {"""unk_token""": """<unk>"""}
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def _UpperCAmelCase ( self , **snake_case ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , **snake_case ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCAmelCase ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase : Dict = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Union[str, Any] = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : Any = tokenizer(snake_case , padding=snake_case , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case )
self.assertIn("""attention_mask""" , snake_case )
self.assertNotIn("""labels""" , snake_case )
self.assertNotIn("""decoder_attention_mask""" , snake_case )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : int = tokenizer(text_target=snake_case , max_length=3_2 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : str = tokenizer(
["""I am a small frog""" * 1_0_2_4, """I am a small frog"""] , padding=snake_case , truncation=snake_case , return_tensors="""pt""" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = ["""A long paragraph for summarization."""]
lowercase : List[str] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase : List[str] = tokenizer(snake_case , text_target=snake_case , return_tensors="""pt""" )
lowercase : Union[str, Any] = inputs["""input_ids"""]
lowercase : Optional[Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : Optional[Any] = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
lowercase : str = """A, <mask> AllenNLP sentence."""
lowercase : int = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
lowercase : str = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 607
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowercase :
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=2 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : List[Any]=0.9 , __lowerCamelCase : Any=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = patch_size
lowercase = tubelet_size
lowercase = num_frames
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = mask_ratio
lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase = (image_size // patch_size) ** 2
lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase = int(mask_ratio * self.seq_length )
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def __a ( self : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = VideoMAEModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
lowercase = VideoMAEForPreTraining(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.num_masks,) )
lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase = mask.expand(self.batch_size , -1 ).bool()
lowercase = model(__lowerCamelCase , __lowerCamelCase )
# model only returns predictions for masked patches
lowercase = mask.sum().item()
lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __a ( self : int ) -> Tuple:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase ,lowercase ,lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _A , _A , unittest.TestCase ):
lowercase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __a ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase = VideoMAEModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def __a ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=False ) -> Optional[int]:
'''simple docstring'''
lowercase = copy.deepcopy(__lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.model_tester.num_masks,) )
lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase = bool_masked_pos.to(__lowerCamelCase )
if return_labels:
if model_class in [
*get_values(__lowerCamelCase ),
]:
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def __a ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def __a ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def __a ( self : Any ) -> Dict:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def __a ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCamelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __a ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __a ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
@slow
def __a ( self : Any ) -> int:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = VideoMAEModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase = True
lowercase = False
lowercase = True
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase = len(__lowerCamelCase )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __a ( self : Dict ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = outputs.hidden_states
lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase ,lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __a ( self : str ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
lowercase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
lowercase = np.load(UpperCAmelCase )
return list(UpperCAmelCase )
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def __a ( self : Dict ) -> str:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __a ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
__lowerCamelCase )
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase = model(**__lowerCamelCase )
# verify the logits
lowercase = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowercase = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def __a ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(__lowerCamelCase )
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# add boolean mask, indicating which patches to mask
lowercase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowercase = torch.load(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase = model(**__lowerCamelCase )
# verify the logits
lowercase = torch.Size([1, 14_08, 15_36] )
lowercase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__lowerCamelCase )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase = torch.tensor([0.5142] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=__lowerCamelCase ).to(
__lowerCamelCase )
with torch.no_grad():
lowercase = model(**__lowerCamelCase )
lowercase = torch.tensor(torch.tensor([0.6469] ) , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1E-4 ) )
| 710
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=7 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=18 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : Union[str, Any]=4_00 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple=True , ) -> Tuple:
'''simple docstring'''
lowercase = size if size is not None else {'''shortest_edge''': 20}
lowercase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def __a ( self : Optional[int] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowercase ( _A , unittest.TestCase ):
lowercase = MobileViTImageProcessor if is_vision_available() else None
def __a ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase = MobileViTImageProcessingTester(self )
@property
def __a ( self : Tuple ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_flip_channel_order''' ) )
def __a ( self : List[str] ) -> str:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __a ( self : Any ) -> Optional[Any]:
'''simple docstring'''
pass
def __a ( self : Any ) -> List[str]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self : int ) -> str:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __a ( self : Dict ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 479
| 0
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
def is_in_circle(__UpperCamelCase , __UpperCamelCase ) -> bool:
lowerCAmelCase_ : List[Any] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowerCAmelCase_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_A ) )
# The ratio of the area for circle to square is pi/4.
lowerCAmelCase_ : List[Any] = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 , ) -> Optional[int]:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_A , _A ) ) for _ in range(_A ) ) * (max_value - min_value)
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 ) -> Optional[int]:
"""simple docstring"""
def identity_function(__UpperCamelCase ) -> float:
return x
lowerCAmelCase_ : Optional[int] = area_under_curve_estimator(
_A , _A , _A , _A )
lowerCAmelCase_ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
def function_to_integrate(__UpperCamelCase ) -> float:
return sqrt(4.0 - x * x )
lowerCAmelCase_ : Optional[int] = area_under_curve_estimator(
_A , _A , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''ylacombe/bark-small'''
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = '''en_speaker_1'''
SCREAMING_SNAKE_CASE__ = '''This is a test string'''
SCREAMING_SNAKE_CASE__ = '''speaker_embeddings_path.json'''
SCREAMING_SNAKE_CASE__ = '''speaker_embeddings'''
def lowercase_ ( self : Union[str, Any] , **__lowerCamelCase : Dict ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BarkProcessor(tokenizer=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE__ = 35
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = {
'''semantic_prompt''': np.ones(__lowerCamelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase_ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BarkProcessor(tokenizer=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string )
SCREAMING_SNAKE_CASE__ = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 493
| 0
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list ) -> list:
"""simple docstring"""
def merge(UpperCamelCase : list , UpperCamelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCamelCase ) <= 1:
return collection
a_ = len(UpperCamelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input('Enter numbers separated by a comma:\n').strip()
_A = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 403
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[int] , UpperCamelCase : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(UpperCamelCase ) == len(UpperCamelCase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
a_ , a_ , a_ = equationa
a_ , a_ , a_ = equationa
# Calculate the determinants of the matrices
a_ = aa * ba - aa * ba
a_ = ca * ba - ca * ba
a_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a_ = determinant_x / determinant
a_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 403
| 1
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'umt5'
_snake_case : Dict = ['past_key_values']
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=250112 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Optional[int]=64 , lowerCAmelCase__ : Optional[int]=1024 , lowerCAmelCase__ : Optional[int]=8 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=6 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : Any=128 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=1e-6 , lowerCAmelCase__ : Union[str, Any]=1.0 , lowerCAmelCase__ : Union[str, Any]="gated-gelu" , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Any="T5Tokenizer" , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : int=0 , **lowerCAmelCase__ : Dict , ) -> int:
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCAmelCase__ , tokenizer_class=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = d_kv
_UpperCamelCase = d_ff
_UpperCamelCase = num_layers
_UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCamelCase = num_heads
_UpperCamelCase = relative_attention_num_buckets
_UpperCamelCase = relative_attention_max_distance
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_factor
_UpperCamelCase = feed_forward_proj
_UpperCamelCase = use_cache
_UpperCamelCase = self.feed_forward_proj.split('''-''' )
_UpperCamelCase = act_info[-1]
_UpperCamelCase = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_UpperCamelCase = '''gelu_new'''
@property
def snake_case__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.d_model
@property
def snake_case__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return self.num_heads
@property
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.num_layers
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
_UpperCamelCase = '''past_encoder_sequence + sequence'''
_UpperCamelCase = {0: '''batch'''}
_UpperCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
return 13
@property
def snake_case__ ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 5e-4
| 98
|
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase__ = NewType('DataClass', Any)
UpperCAmelCase__ = NewType('DataClassType', Any)
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> Union[str, Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _UpperCAmelCase ( __lowerCamelCase : list ) -> Callable[[str], Any]:
_snake_case = {str(__lowerCamelCase ): choice for choice in choices}
return lambda __lowerCamelCase : str_to_choice.get(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( *,
__lowerCamelCase : Union[str, List[str]] = None , __lowerCamelCase : str = None , __lowerCamelCase : Any = dataclasses.MISSING , __lowerCamelCase : Callable[[], Any] = dataclasses.MISSING , __lowerCamelCase : dict = None , **__lowerCamelCase : Optional[Any] , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_snake_case = {}
if aliases is not None:
_snake_case = aliases
if help is not None:
_snake_case = help
return dataclasses.field(metadata=__lowerCamelCase , default=__lowerCamelCase , default_factory=__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__ ( A_ ):
__a = 42
def __init__( self : str , _lowerCamelCase : Union[DataClassType, Iterable[DataClassType]] , **_lowerCamelCase : List[str] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCamelCase )
if dataclasses.is_dataclass(_lowerCamelCase ):
_snake_case = [dataclass_types]
_snake_case = list(_lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCamelCase )
@staticmethod
def lowercase ( _lowerCamelCase : ArgumentParser , _lowerCamelCase : dataclasses.Field ):
_snake_case = f'''--{field.name}'''
_snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCamelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_snake_case = kwargs.pop('''aliases''' , [] )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case = [aliases]
_snake_case = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_lowerCamelCase , '''UnionType''' ) and isinstance(_lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f''' Problem encountered in field \'{field.name}\'.''' )
if type(_lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
_snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_snake_case = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_snake_case = (
field.type.__args__[0] if isinstance(_lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_snake_case = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_snake_case = {}
if origin_type is Literal or (isinstance(field.type , _lowerCamelCase ) and issubclass(field.type , _lowerCamelCase )):
if origin_type is Literal:
_snake_case = field.type.__args__
else:
_snake_case = [x.value for x in field.type]
_snake_case = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_snake_case = field.default
else:
_snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_snake_case = copy(_lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
_snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
_snake_case = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_snake_case = True
elif isclass(_lowerCamelCase ) and issubclass(_lowerCamelCase , _lowerCamelCase ):
_snake_case = field.type.__args__[0]
_snake_case = '''+'''
if field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
_snake_case = True
else:
_snake_case = field.type
if field.default is not dataclasses.MISSING:
_snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
else:
_snake_case = True
parser.add_argument(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_snake_case = False
parser.add_argument(f'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : DataClassType ):
if hasattr(_lowerCamelCase , '''_argument_group_name''' ):
_snake_case = self.add_argument_group(dtype._argument_group_name )
else:
_snake_case = self
try:
_snake_case = get_type_hints(_lowerCamelCase )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCamelCase ):
_snake_case = '''.'''.join(map(_lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_lowerCamelCase ):
if not field.init:
continue
_snake_case = type_hints[field.name]
self._parse_dataclass_field(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Dict , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_snake_case = []
if args_filename:
args_files.append(Path(_lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_snake_case = ArgumentParser()
args_file_parser.add_argument(_lowerCamelCase , type=_lowerCamelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_snake_case , _snake_case = args_file_parser.parse_known_args(args=_lowerCamelCase )
_snake_case = vars(_lowerCamelCase ).get(args_file_flag.lstrip('''-''' ) , _lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCamelCase ) for p in cmd_args_file_paths] )
_snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
_snake_case , _snake_case = self.parse_known_args(args=_lowerCamelCase )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(_lowerCamelCase ) if f.init}
_snake_case = {k: v for k, v in vars(_lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCamelCase , _lowerCamelCase )
_snake_case = dtype(**_lowerCamelCase )
outputs.append(_lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def lowercase ( self : Optional[int] , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : bool = False ):
_snake_case = set(args.keys() )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(_lowerCamelCase ) if f.init}
_snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_snake_case = dtype(**_lowerCamelCase )
outputs.append(_lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(_lowerCamelCase )}''' )
return tuple(_lowerCamelCase )
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : bool = False ):
with open(Path(_lowerCamelCase ) , encoding='''utf-8''' ) as open_json_file:
_snake_case = json.loads(open_json_file.read() )
_snake_case = self.parse_dict(_lowerCamelCase , allow_extra_keys=_lowerCamelCase )
return tuple(_lowerCamelCase )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : bool = False ):
_snake_case = self.parse_dict(yaml.safe_load(Path(_lowerCamelCase ).read_text() ) , allow_extra_keys=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 430
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=False , _lowerCamelCase : Optional[int]=10 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : str=32 * 8 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : Union[str, Any]=64 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = hidden_dim
_snake_case = hidden_dim
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : Union[str, Any] ):
_snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_snake_case = self.num_queries
_snake_case = self.num_labels
_snake_case = [1, 1, 1, 1]
_snake_case = self.num_channels
_snake_case = 64
_snake_case = 128
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
return config
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False ):
with torch.no_grad():
_snake_case = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
_snake_case = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : Any ):
_snake_case = MaskaFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase ( self : List[str] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : int ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase ( self : Dict ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def lowercase ( self : Dict ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_snake_case = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase ( self : Tuple ):
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
_snake_case = self.model_tester.get_config()
_snake_case = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : List[str] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Dict ):
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase__ = 1e-4
def _UpperCAmelCase ( ) -> int:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Tuple ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase ( self : Optional[Any] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase ( self : Any ):
_snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
_snake_case = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : List[Any] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_snake_case = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_snake_case = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : List[str] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_snake_case = inputs['''pixel_values'''].to(_lowerCamelCase )
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']]
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 430
| 1
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : List[str] = k_size // 2
__lowerCamelCase , __lowerCamelCase : Tuple = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__lowerCamelCase : int = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase__ ) + square(lowerCamelCase__ )) / (2 * square(lowerCamelCase__ )) )
return g
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : str = image.shape[0], image.shape[1]
# dst image height and width
__lowerCamelCase : int = height - k_size + 1
__lowerCamelCase : List[str] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__lowerCamelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
__lowerCamelCase : Any = 0
for i, j in product(range(lowerCamelCase__ ) , range(lowerCamelCase__ ) ):
__lowerCamelCase : str = ravel(image[i : i + k_size, j : j + k_size] )
__lowerCamelCase : int = window
row += 1
# turn the kernel into shape(k*k, 1)
__lowerCamelCase : Tuple = gen_gaussian_kernel(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : str = ravel(lowerCamelCase__ )
# reshape and get the dst image
__lowerCamelCase : int = dot(lowerCamelCase__ , lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
return dst
if __name__ == "__main__":
# read original image
a =imread(r"""../image_data/lena.jpg""")
# turn image in gray scale value
a =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
a =gaussian_filter(gray, 3, sigma=1)
a =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 652
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652
| 1
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_0_2_4, lowerCamelCase_=3.6 ):
'''simple docstring'''
lowerCamelCase__ : str = tokenizer
lowerCamelCase__ : Any = tokenizer.bos_token_id
lowerCamelCase__ : Dict = dataset
lowerCamelCase__ : Dict = seq_length
lowerCamelCase__ : int = seq_length * chars_per_token * num_of_sequences
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = iter(self.dataset )
lowerCamelCase__ : int = True
while more_examples:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCamelCase_ )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowerCamelCase__ : Union[str, Any] = False
break
lowerCamelCase__ : Union[str, Any] = tokenizer(lowerCamelCase_, truncation=lowerCamelCase_ )['input_ids']
lowerCamelCase__ : Dict = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0, len(lowerCamelCase_ ), self.seq_length ):
lowerCamelCase__ : List[Any] = all_token_ids[i : i + self.seq_length]
if len(lowerCamelCase_ ) == self.seq_length:
yield torch.tensor(lowerCamelCase_ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = {'streaming': True}
lowerCamelCase__ : Optional[Any] = load_dataset(args.dataset_name , split='train' , **_lowerCamelCase )
lowerCamelCase__ : int = ConstantLengthDataset(_lowerCamelCase , _lowerCamelCase , seq_length=args.seq_length )
lowerCamelCase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase_ ( _lowerCamelCase ):
model.eval()
lowerCamelCase__ : int = []
for step, batch in enumerate(_lowerCamelCase ):
with torch.no_grad():
lowerCamelCase__ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
lowerCamelCase__ : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowerCamelCase__ : Any = torch.mean(torch.cat(_lowerCamelCase ) )
try:
lowerCamelCase__ : Dict = torch.exp(_lowerCamelCase )
except OverflowError:
lowerCamelCase__ : Optional[Any] = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
A_ : Optional[int] = Accelerator()
# Parse configuration
A_ : Any = HfArgumentParser(EvaluationArguments)
A_ : Dict = parser.parse_args()
set_seed(args.seed)
# Logging
A_ : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
A_ : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
A_ : int = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
A_ : List[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
A_, A_ : Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
A_, A_ : Optional[Any] = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 696
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : Optional[int] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : str = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : Optional[int] = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Tuple = c.n_embd + 1 # int
lowerCamelCase__ : Union[str, Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : List[Any] = not c.scale_attn_weights # bool
lowerCamelCase__ : List[Any] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = PretrainedConfig()
lowerCamelCase__ : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : Any = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Any = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : str = ['config.42.0.0.json']
lowerCamelCase__ : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Optional[int] = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Dict = 'v3.0.0'
lowerCamelCase__ : List[str] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 696
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : list[str] ):
'''simple docstring'''
UpperCAmelCase__ : list[dict] = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(_A )
self.set_fail_transitions()
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 0
for character in keyword:
UpperCAmelCase__ : Tuple = self.find_next_state(_A , _A )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ : int = len(self.adlist ) - 1
else:
UpperCAmelCase__ : str = next_state
self.adlist[current_state]["output"].append(_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_A )
UpperCAmelCase__ : int = 0
while q:
UpperCAmelCase__ : Tuple = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_A )
UpperCAmelCase__ : Dict = self.adlist[r]['''fail_state''']
while (
self.find_next_state(_A , self.adlist[child]['''value'''] ) is None
and state != 0
):
UpperCAmelCase__ : Union[str, Any] = self.adlist[state]['''fail_state''']
UpperCAmelCase__ : Tuple = self.find_next_state(
_A , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : int = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : dict = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ : Optional[Any] = 0
for i in range(len(_A ) ):
while (
self.find_next_state(_A , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ : int = self.adlist[current_state]['''fail_state''']
UpperCAmelCase__ : Dict = self.find_next_state(_A , string[i] )
if next_state is None:
UpperCAmelCase__ : Union[str, Any] = 0
else:
UpperCAmelCase__ : Any = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ : str = []
result[key].append(i - len(_A ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = CycleDiffusionPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
_a = PipelineTesterMixin.required_optional_params - {"""latents"""}
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> Dict:
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a = CLIPTextModel(UpperCamelCase )
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Any:
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
__a = image / 2 + 0.5
if str(UpperCamelCase ).startswith('mps' ):
__a = torch.manual_seed(UpperCamelCase )
else:
__a = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__a = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = CycleDiffusionPipeline(**UpperCamelCase )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = self.get_dummy_inputs(UpperCamelCase )
__a = pipe(**UpperCamelCase )
__a = output.images
__a = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__a = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ ( self ) -> Tuple:
__a = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCamelCase , 'half' ):
__a = module.half()
__a = CycleDiffusionPipeline(**UpperCamelCase )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = self.get_dummy_inputs(UpperCamelCase )
__a = pipe(**UpperCamelCase )
__a = output.images
__a = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__a = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase__ ( self ) -> int:
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def UpperCamelCase__ ( self ) -> List[Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCamelCase__ ( self ) -> Any:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase__ ( self ) -> int:
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ) -> Tuple:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Tuple:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__a = init_image.resize((512, 512) )
__a = 'CompVis/stable-diffusion-v1-4'
__a = DDIMScheduler.from_pretrained(UpperCamelCase , subfolder='scheduler' )
__a = CycleDiffusionPipeline.from_pretrained(
UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
__a = 'A black colored car'
__a = 'A blue colored car'
__a = torch.manual_seed(0 )
__a = pipe(
prompt=UpperCamelCase , source_prompt=UpperCamelCase , image=UpperCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase , output_type='np' , )
__a = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def UpperCamelCase__ ( self ) -> List[Any]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__a = init_image.resize((512, 512) )
__a = 'CompVis/stable-diffusion-v1-4'
__a = DDIMScheduler.from_pretrained(UpperCamelCase , subfolder='scheduler' )
__a = CycleDiffusionPipeline.from_pretrained(UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
__a = 'A black colored car'
__a = 'A blue colored car'
__a = torch.manual_seed(0 )
__a = pipe(
prompt=UpperCamelCase , source_prompt=UpperCamelCase , image=UpperCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase , output_type='np' , )
__a = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 720
|
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
with open(os.path.dirname(a_ ) + '/grid.txt' ) as f:
__a = [] # noqa: E741
for _ in range(20 ):
l.append([int(a_ ) for x in f.readline().split()] )
__a = 0
# right
for i in range(20 ):
for j in range(17 ):
__a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a = temp
# down
for i in range(17 ):
for j in range(20 ):
__a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 490
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : List[Any] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
UpperCamelCase__ : Union[str, Any] = '''▁'''
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Any = PRETRAINED_VOCAB_FILES_MAP
__a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Any = BigBirdTokenizer
__a : str = ["input_ids", "attention_mask"]
__a : List[int] = []
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__="<unk>" ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="<pad>" ,snake_case__="[SEP]" ,snake_case__="[MASK]" ,snake_case__="[CLS]" ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Tuple = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else bos_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else eos_token
SCREAMING_SNAKE_CASE_ : List[Any] = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else unk_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else pad_token
SCREAMING_SNAKE_CASE_ : int = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else cls_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : List[Any] = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else mask_token
super().__init__(
snake_case__ ,tokenizer_file=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,pad_token=snake_case__ ,cls_token=snake_case__ ,mask_token=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : int = False if not self.vocab_file else True
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self ,snake_case__ ,snake_case__ = None ,snake_case__ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : str = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file ,snake_case__ )
return (out_vocab_file,)
| 105
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_SCREAMING_SNAKE_CASE : List[Any] = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_SCREAMING_SNAKE_CASE : List[Any] = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_SCREAMING_SNAKE_CASE : List[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_SCREAMING_SNAKE_CASE : List[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any]):
import nltk
nltk.download("wordnet")
if NLTK_VERSION >= version.Version("3.6.5"):
nltk.download("punkt")
if NLTK_VERSION >= version.Version("3.6.6"):
nltk.download("omw-1.4")
def UpperCAmelCase__ ( self : int , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int=0.9 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : Dict=0.5):
if NLTK_VERSION >= version.Version("3.6.5"):
_lowercase: List[str] = [
meteor_score.single_meteor_score(
word_tokenize(_UpperCamelCase) , word_tokenize(_UpperCamelCase) , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase)
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase)
]
else:
_lowercase: Optional[int] = [
meteor_score.single_meteor_score(_UpperCamelCase , _UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase)
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase)
]
return {"meteor": np.mean(_UpperCamelCase)}
| 226
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 345
|
from __future__ import annotations
from typing import Any
def __lowercase( __snake_case : list ) -> int:
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__snake_case ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
| 1
|
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return np.where(vector > 0 , __UpperCamelCase , (alpha * (np.exp(__UpperCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case_ ( __snake_case : list[Any]) -> None:
create_state_space_tree(__snake_case , [] , 0)
def snake_case_ ( __snake_case : list[Any] , __snake_case : list[Any] , __snake_case : int) -> None:
if index == len(__snake_case):
print(__snake_case)
return
create_state_space_tree(__snake_case , __snake_case , index + 1)
current_subsequence.append(sequence[index])
create_state_space_tree(__snake_case , __snake_case , index + 1)
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 274
| 0
|
import itertools
import math
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( ) -> Optional[Any]:
lowercase__ : List[str] = 2
while True:
if is_prime(SCREAMING_SNAKE_CASE_ ):
yield num
num += 1
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1_00_01 ) -> int:
return next(itertools.islice(prime_generator() ,nth - 1 ,SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 708
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" ,[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,) -> List[Any]:
lowercase__ : List[str] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
lowercase__ , lowercase__ : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase__ : List[Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE_ )
assert base_extractor.is_extractable(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase__ : Tuple = file_path.read_text(encoding="utf-8" )
else:
lowercase__ : List[str] = output_path.read_text(encoding="utf-8" )
lowercase__ : Dict = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" ,[
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,) -> Optional[Any]:
lowercase__ : Optional[Any] = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
lowercase__ : List[Any] = input_paths[compression_format]
if input_path is None:
lowercase__ : Union[str, Any] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = Extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
assert extractor_format is not None
lowercase__ : Optional[int] = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase__ : Tuple = file_path.read_text(encoding="utf-8" )
else:
lowercase__ : Dict = output_path.read_text(encoding="utf-8" )
lowercase__ : Optional[Any] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
import tarfile
lowercase__ : Any = tmp_path / "data_dot_dot"
directory.mkdir()
lowercase__ : Union[str, Any] = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.add(SCREAMING_SNAKE_CASE_ ,arcname=os.path.join(".." ,text_file.name ) )
return path
@pytest.fixture
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
import tarfile
lowercase__ : List[str] = tmp_path / "data_sym_link"
directory.mkdir()
lowercase__ : Tuple = directory / "tar_file_with_sym_link.tar"
os.symlink(".." ,directory / "subdir" ,target_is_directory=SCREAMING_SNAKE_CASE_ )
with tarfile.TarFile(SCREAMING_SNAKE_CASE_ ,"w" ) as f:
f.add(str(directory / "subdir" ) ,arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" ,[("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] ,)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : Dict = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
lowercase__ : Dict = insecure_tar_files[insecure_tar_file]
lowercase__ : Optional[int] = tmp_path / "extracted"
TarExtractor.extract(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
lowercase__ : Dict = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
lowercase__ : str = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
b"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
b"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
b"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
assert zipfile.is_zipfile(str(SCREAMING_SNAKE_CASE_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(SCREAMING_SNAKE_CASE_ ) # but we're right
| 298
| 0
|
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
def __call__( self ):
'''simple docstring'''
UpperCamelCase : Tuple = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCamelCase : Any = 1
UpperCamelCase : Optional[Any] = self.unet(_A , _A ).sample
UpperCamelCase : List[str] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCamelCase : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_A )
return result
| 102
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : List[Any] = logging.get_logger(__name__)
__magic_name__ : List[str] = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if "emb" in name:
UpperCamelCase : Tuple = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
UpperCamelCase : List[str] = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
UpperCamelCase : str = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
UpperCamelCase : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
UpperCamelCase : int = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
UpperCamelCase : List[Any] = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
UpperCamelCase : int = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
UpperCamelCase : str = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
UpperCamelCase : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
UpperCamelCase : str = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase : Dict = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = list(state_dict.keys() )
UpperCamelCase : Optional[Any] = {}
for key in keys:
UpperCamelCase : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = rename_keys(SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase : Optional[int] = val[:hidden_size, :]
UpperCamelCase : List[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase : Any = val
else:
UpperCamelCase : str = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if checkpoint == "small":
# default config values
UpperCamelCase : Tuple = 1024
UpperCamelCase : Dict = 24
UpperCamelCase : Optional[int] = 16
elif checkpoint == "medium":
UpperCamelCase : Optional[Any] = 1536
UpperCamelCase : int = 48
UpperCamelCase : int = 24
elif checkpoint == "large":
UpperCamelCase : Any = 2048
UpperCamelCase : List[Any] = 48
UpperCamelCase : str = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCamelCase : Optional[int] = MusicgenDecoderConfig(
hidden_size=SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=SCREAMING_SNAKE_CASE , num_attention_heads=SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="cpu" ):
UpperCamelCase : Optional[Any] = MusicGen.get_pretrained(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = decoder_config_from_checkpoint(SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = fairseq_model.lm.state_dict()
UpperCamelCase , UpperCamelCase : Optional[int] = rename_state_dict(
SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
UpperCamelCase : int = TaEncoderModel.from_pretrained("""t5-base""" )
UpperCamelCase : List[str] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
UpperCamelCase : Optional[int] = MusicgenForCausalLM(SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase , UpperCamelCase : Tuple = decoder.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCamelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=SCREAMING_SNAKE_CASE , audio_encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(SCREAMING_SNAKE_CASE )
# check we can do a forward pass
UpperCamelCase : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCamelCase : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCamelCase : Any = model(input_ids=SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
UpperCamelCase : Any = AutoTokenizer.from_pretrained("""t5-base""" )
UpperCamelCase : Tuple = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
UpperCamelCase : int = MusicgenProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
UpperCamelCase : Optional[int] = 2048
UpperCamelCase : Tuple = 2048
# set other default generation config params
UpperCamelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase : int = True
UpperCamelCase : Dict = 3.0
if pytorch_dump_folder is not None:
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(SCREAMING_SNAKE_CASE )
processor.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__magic_name__ : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 102
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def lowercase__ ( ) -> None:
"""simple docstring"""
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 51
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class a__:
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> int:
snake_case__ =parent
snake_case__ =batch_size
snake_case__ =seq_length
snake_case__ =is_training
snake_case__ =use_input_mask
snake_case__ =use_token_type_ids
snake_case__ =use_labels
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =intermediate_size
snake_case__ =hidden_act
snake_case__ =hidden_dropout_prob
snake_case__ =attention_probs_dropout_prob
snake_case__ =max_position_embeddings
snake_case__ =type_vocab_size
snake_case__ =type_sequence_label_size
snake_case__ =initializer_range
snake_case__ =num_labels
snake_case__ =num_choices
snake_case__ =scope
def _lowercase ( self ) -> Tuple:
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ =None
if self.use_input_mask:
snake_case__ =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ =None
snake_case__ =None
snake_case__ =None
snake_case__ =None
if self.use_labels:
snake_case__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> str:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_UpperCAmelCase , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
snake_case__ =FalconModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
snake_case__ =model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> List[str]:
snake_case__ =True
snake_case__ =FalconModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Dict:
snake_case__ =FalconForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
snake_case__ =True
snake_case__ =True
snake_case__ =FalconForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase , )
snake_case__ =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ =ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ =torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ =torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
snake_case__ =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
# select random slice
snake_case__ =ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ =output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def _lowercase ( self ) -> List[str]:
snake_case__ =self.prepare_config_and_inputs()
(
snake_case__
) =config_and_inputs
snake_case__ ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
a_ : Optional[int] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Tuple = (FalconForCausalLM,) if is_torch_available() else ()
a_ : Union[str, Any] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[int] = False
a_ : List[str] = False
def _lowercase ( self ) -> Optional[int]:
snake_case__ =FalconModelTester(self )
snake_case__ =ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowercase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
snake_case__ =alibi
self.model_tester.create_and_check_model(_UpperCAmelCase , *_UpperCAmelCase )
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ =3
snake_case__ =input_dict['input_ids']
snake_case__ =input_ids.ne(1 ).to(_UpperCAmelCase )
snake_case__ =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ =FalconForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ =3
snake_case__ ='single_label_classification'
snake_case__ =input_dict['input_ids']
snake_case__ =input_ids.ne(1 ).to(_UpperCAmelCase )
snake_case__ =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ =FalconForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> Tuple:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ =input_dict['input_ids']
snake_case__ =FalconForCausalLM(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
snake_case__ =input_ids.shape[0]
snake_case__ =model._convert_to_rw_cache(result.past_key_values )
snake_case__ =model._convert_cache_to_standard_format(_UpperCAmelCase , _UpperCAmelCase )
for layer in range(len(_UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ =3
snake_case__ ='multi_label_classification'
snake_case__ =input_dict['input_ids']
snake_case__ =input_ids.ne(1 ).to(_UpperCAmelCase )
snake_case__ =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ =FalconForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case__ =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> str:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
snake_case__ =self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_UpperCAmelCase , 'use_cache' ):
return
snake_case__ =model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
if "use_cache" not in inputs:
snake_case__ =True
snake_case__ =model(**_UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
snake_case__ =(
getattr(_UpperCAmelCase , 'decoder_layers' , _UpperCAmelCase )
or getattr(_UpperCAmelCase , 'num_decoder_layers' , _UpperCAmelCase )
or config.num_hidden_layers
)
snake_case__ =getattr(_UpperCAmelCase , 'num_kv_heads' , config.num_attention_heads )
snake_case__ =getattr(_UpperCAmelCase , 'd_model' , config.hidden_size )
snake_case__ =embed_dim // num_attention_heads
snake_case__ =outputs['past_key_values']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
snake_case__ =inputs['input_ids'].shape
for i in range(_UpperCAmelCase ):
if config.new_decoder_architecture:
snake_case__ =config.num_attention_heads
elif config.multi_query:
snake_case__ =1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class a__( unittest.TestCase ):
@slow
def _lowercase ( self ) -> int:
snake_case__ =AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
snake_case__ =FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(_UpperCAmelCase )
snake_case__ =tokenizer('My favorite food is' , return_tensors='pt' ).to(_UpperCAmelCase )
snake_case__ =(
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
snake_case__ =model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=19 )
snake_case__ =tokenizer.batch_decode(_UpperCAmelCase )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def _lowercase ( self ) -> Any:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
snake_case__ =AutoTokenizer.from_pretrained(_UpperCAmelCase )
snake_case__ =FalconForCausalLM.from_pretrained(_UpperCAmelCase )
model.eval()
model.to(_UpperCAmelCase )
snake_case__ =tokenizer('My favorite food is' , return_tensors='pt' ).to(_UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=4 )
model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=4 )
model.generate(**_UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def _lowercase ( self ) -> List[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
snake_case__ =AutoTokenizer.from_pretrained(_UpperCAmelCase )
snake_case__ =FalconForCausalLM.from_pretrained(_UpperCAmelCase )
model.eval()
model.to(device=_UpperCAmelCase )
snake_case__ =tokenizer('My favorite food is' , return_tensors='pt' ).to(_UpperCAmelCase )
# Test results are the same with and without cache
snake_case__ =model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20 , use_cache=_UpperCAmelCase )
snake_case__ =model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20 , use_cache=_UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 538
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] ,UpperCamelCase : int = 3 ,UpperCamelCase : int = 3 ,UpperCamelCase : Tuple[str] = ("DownEncoderBlock2D",) ,UpperCamelCase : Tuple[str] = ("UpDecoderBlock2D",) ,UpperCamelCase : Tuple[int] = (64,) ,UpperCamelCase : int = 1 ,UpperCamelCase : str = "silu" ,UpperCamelCase : int = 3 ,UpperCamelCase : int = 32 ,UpperCamelCase : int = 256 ,UpperCamelCase : int = 32 ,UpperCamelCase : Optional[int] = None ,UpperCamelCase : float = 0.1_8_2_1_5 ,UpperCamelCase : str = "group" ,) -> List[Any]:
super().__init__()
# pass init params to Encoder
_lowercase : Any = Encoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,down_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,act_fn=UpperCamelCase ,norm_num_groups=UpperCamelCase ,double_z=UpperCamelCase ,)
_lowercase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowercase : List[str] = nn.Convad(UpperCamelCase ,UpperCamelCase ,1 )
_lowercase : Dict = VectorQuantizer(UpperCamelCase ,UpperCamelCase ,beta=0.2_5 ,remap=UpperCamelCase ,sane_index_shape=UpperCamelCase )
_lowercase : List[str] = nn.Convad(UpperCamelCase ,UpperCamelCase ,1 )
# pass init params to Decoder
_lowercase : Optional[Any] = Decoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,up_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,act_fn=UpperCamelCase ,norm_num_groups=UpperCamelCase ,norm_type=UpperCamelCase ,)
@apply_forward_hook
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = True ) -> VQEncoderOutput:
_lowercase : Optional[int] = self.encoder(UpperCamelCase )
_lowercase : Any = self.quant_conv(UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase )
@apply_forward_hook
def _lowerCamelCase ( self : Optional[int] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = False ,UpperCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_lowercase , _lowercase , _lowercase : Optional[int] = self.quantize(UpperCamelCase )
else:
_lowercase : Tuple = h
_lowercase : Optional[int] = self.post_quant_conv(UpperCamelCase )
_lowercase : Dict = self.decoder(UpperCamelCase ,quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
def _lowerCamelCase ( self : Optional[Any] ,UpperCamelCase : torch.FloatTensor ,UpperCamelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_lowercase : Dict = sample
_lowercase : Dict = self.encode(UpperCamelCase ).latents
_lowercase : str = self.decode(UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
| 125
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class a_ :
lowerCamelCase__ : Union[str, Any] = XGLMConfig
lowerCamelCase__ : str = {}
lowerCamelCase__ : Any = 'gelu'
def __init__( self , UpperCAmelCase , UpperCAmelCase=14 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=0.02 , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_labels
a_ = vocab_size
a_ = d_model
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = ffn_dim
a_ = activation_function
a_ = activation_dropout
a_ = attention_dropout
a_ = max_position_embeddings
a_ = initializer_range
a_ = None
a_ = 0
a_ = 2
a_ = 1
def lowerCAmelCase__ ( self ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def lowerCAmelCase__ ( self ):
a_ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = self.get_config()
a_ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase__ ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase , )
def lowerCAmelCase__ ( self ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : List[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase__ : Dict = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ : List[Any] = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Union[str, Any] = False
def lowerCAmelCase__ ( self ):
a_ = TFXGLMModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase , n_embd=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFXGLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def lowerCAmelCase__ ( self ):
super().test_resize_token_embeddings()
@require_tf
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self , UpperCAmelCase=True ):
a_ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
a_ = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
a_ = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
a_ = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
a_ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
a_ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
a_ = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
a_ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
a_ = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase , seed=[7, 0] )
a_ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase )
a_ = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
a_ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
a_ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
a_ = """left"""
# use different length sentences to test batching
a_ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
a_ = tokenizer(UpperCAmelCase , return_tensors="""tf""" , padding=UpperCAmelCase )
a_ = inputs["""input_ids"""]
a_ = model.generate(input_ids=UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
a_ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
a_ = model.generate(input_ids=UpperCAmelCase , max_new_tokens=12 )
a_ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
a_ = model.generate(input_ids=UpperCAmelCase , max_new_tokens=12 )
a_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
a_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase )
a_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase )
a_ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 511
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowercase__ =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowercase__ =12_80_22
lowercase__ =12_80_28
@require_sentencepiece
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] = MaMaaaTokenizer
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : str = True
def lowerCAmelCase__ ( self ):
super().setUp()
a_ = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
a_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
a_ = Path(self.tmpdirname )
save_json(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
a_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , **UpperCAmelCase ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self ):
a_ = """</s>"""
a_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.get_tokenizer()
a_ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(UpperCAmelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
a_ = self.get_tokenizer()
a_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [2, 3, 4, 5, 6] , )
a_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
a_ = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """This is a test""" )
@slow
def lowerCAmelCase__ ( self ):
# fmt: off
a_ = {"""input_ids""": [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
lowerCamelCase__ : int = 'facebook/m2m100_418M'
lowerCamelCase__ : Union[str, Any] = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
lowerCamelCase__ : int = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
lowerCamelCase__ : Optional[int] = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2]
@classmethod
def lowerCAmelCase__ ( cls ):
a_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
a_ = 1
return cls
def lowerCAmelCase__ ( self ):
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_80_06 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_80_22 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_80_76 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_80_63 )
def lowerCAmelCase__ ( self ):
a_ = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCAmelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = """en"""
a_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
a_ = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
a_ = self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
a_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = tempfile.mkdtemp()
a_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCAmelCase )
a_ = MaMaaaTokenizer.from_pretrained(UpperCAmelCase )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCAmelCase )
@require_torch
def lowerCAmelCase__ ( self ):
a_ = """en"""
a_ = """fr"""
a_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , return_tensors="""pt""" )
a_ = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
a_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCAmelCase__ ( self ):
a_ = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
a_ = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def lowerCAmelCase__ ( self ):
a_ = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
a_ = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def lowerCAmelCase__ ( self ):
a_ = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
# en_XX, A, test, EOS
"""input_ids""": [[12_80_22, 58, 41_83, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_80_06,
} , )
| 511
| 1
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def A__ ( self :List[str] ):
'''simple docstring'''
super().setUp()
def A__ ( self :List[str] , **__snake_case :str ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def A__ ( self :str , **__snake_case :Dict ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] ="""永和服装饰品有限公司,今天天气非常好"""
__magic_name__ : Any ="""永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.get_tokenizer()
__magic_name__ , __magic_name__ : Optional[int] =self.get_chinese_input_output_texts()
__magic_name__ : Any =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__ : Any =tokens + [tokenizer.unk_token]
__magic_name__ : Union[str, Any] =[2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : str =self.get_rust_tokenizer()
__magic_name__ , __magic_name__ : Any =self.get_chinese_input_output_texts()
__magic_name__ : Any =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__ : Optional[int] =tokens + [tokenizer.unk_token]
__magic_name__ : Any =[2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Dict ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
| 21
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase)
| 8
| 0
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def snake_case__ ( _snake_case : str ):
"""simple docstring"""
for char in word:
UpperCamelCase__ = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = set()
for token in tokens:
UpperCamelCase__ = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
UpperCamelCase__ = list(_snake_case )
return word_list
def snake_case__ ( _snake_case : List[str] , _snake_case : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
UpperCamelCase__ = max([len(_snake_case ) for w in chinese_word_set] )
UpperCamelCase__ = bert_tokens
UpperCamelCase__ , UpperCamelCase__ = 0, len(_snake_case )
while start < end:
UpperCamelCase__ = True
if is_chinese(bert_word[start] ):
UpperCamelCase__ = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
UpperCamelCase__ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase__ = "##" + bert_word[j]
UpperCamelCase__ = start + i
UpperCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def snake_case__ ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
"""simple docstring"""
UpperCamelCase__ = []
for i in range(0 , len(_snake_case ) , 1_00 ):
UpperCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
UpperCamelCase__ = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
UpperCamelCase__ = []
for i in range(0 , len(_snake_case ) , 1_00 ):
UpperCamelCase__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(_snake_case ) == len(_snake_case )
UpperCamelCase__ = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
UpperCamelCase__ = []
for id in input_ids:
UpperCamelCase__ = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
UpperCamelCase__ = add_sub_symbol(_snake_case , _snake_case )
UpperCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
UpperCamelCase__ = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase__ = LTP(args.ltp ) # faster in GPU device
UpperCamelCase__ = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase__ = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
UpperCamelCase__ = [json.dumps(_snake_case ) + "\n" for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
A : int = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
A : Optional[int] = parser.parse_args()
main(args)
| 304
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A = KandinskyVaaPipeline
A = [
'image_embeds',
'negative_image_embeds',
]
A = ['image_embeds', 'negative_image_embeds']
A = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A = False
@property
def lowerCamelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def lowerCamelCase__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return 3_2
@property
def lowerCamelCase__ ( self :Any ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase__ ( self :List[Any] ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 1_0_0
@property
def lowerCamelCase__ ( self :Tuple ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase__ = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def lowerCamelCase__ ( self :str ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self :int ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase_ , )
UpperCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCamelCase__ ( self :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int]=0 ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCamelCase__ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = "cpu"
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**lowerCamelCase_ )
UpperCamelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase__ = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
UpperCamelCase__ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase__ = "red cat, 4k photo"
UpperCamelCase__ = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase__ = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase__ = pipeline(
image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_0_0 , output_type="np" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 304
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__UpperCamelCase : Union[str, Any] = generate_large_matrix()
__UpperCamelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
assert all(row == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for row in grid )
assert all(list(_UpperCAmelCase ) == sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ) for col in zip(*_UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] ):
lowerCAmelCase = 0
lowerCAmelCase = len(_UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase = (left + right) // 2
lowerCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase = mid + 1
else:
lowerCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(grid[0] )
for i in range(len(_UpperCAmelCase ) ):
lowerCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCAmelCase ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[list[int]] ):
lowerCAmelCase = 0
for row in grid:
for i, number in enumerate(_UpperCAmelCase ):
if number < 0:
total += len(_UpperCAmelCase ) - i
break
return total
def _SCREAMING_SNAKE_CASE ():
from timeit import timeit
print('Running benchmarks' )
lowerCAmelCase = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase = timeit(F'{func}(grid=grid)' , setup=_UpperCAmelCase , number=500 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 4
|
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] =logging.get_logger(__name__)
a__ : int ={
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] ="time_series_transformer"
SCREAMING_SNAKE_CASE_ : Optional[int] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Dict , __A : Optional[int] = None , __A : Optional[int] = None , __A : str = "student_t" , __A : str = "nll" , __A : int = 1 , __A : List[int] = [1, 2, 3, 4, 5, 6, 7] , __A : Optional[Union[str, bool]] = "mean" , __A : int = 0 , __A : int = 0 , __A : int = 0 , __A : int = 0 , __A : Optional[List[int]] = None , __A : Optional[List[int]] = None , __A : int = 3_2 , __A : int = 3_2 , __A : int = 2 , __A : int = 2 , __A : int = 2 , __A : int = 2 , __A : bool = True , __A : str = "gelu" , __A : int = 6_4 , __A : float = 0.1 , __A : float = 0.1 , __A : float = 0.1 , __A : float = 0.1 , __A : float = 0.1 , __A : int = 1_0_0 , __A : float = 0.02 , __A : int=True , **__A : Tuple , ):
# time series specific configuration
__UpperCamelCase = prediction_length
__UpperCamelCase = context_length or prediction_length
__UpperCamelCase = distribution_output
__UpperCamelCase = loss
__UpperCamelCase = input_size
__UpperCamelCase = num_time_features
__UpperCamelCase = lags_sequence
__UpperCamelCase = scaling
__UpperCamelCase = num_dynamic_real_features
__UpperCamelCase = num_static_real_features
__UpperCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__UpperCamelCase = cardinality
else:
__UpperCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__UpperCamelCase = embedding_dimension
else:
__UpperCamelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
__UpperCamelCase = input_size * len(__A ) + self._number_of_features
__UpperCamelCase = d_model
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = decoder_layers
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = use_cache
super().__init__(is_encoder_decoder=__A , **__A )
@property
def _lowerCamelCase ( self : List[str] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 434
|
'''simple docstring'''
import inspect
import unittest
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Optional[Any] ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowerCamelCase ( self : Tuple ):
import diffusers
from diffusers.dependency_versions_table import deps
__UpperCamelCase = inspect.getmembers(__A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__UpperCamelCase = 'k-diffusion'
elif backend == "invisible_watermark":
__UpperCamelCase = 'invisible-watermark'
assert backend in deps, f'''{backend} is not in the deps table!'''
| 434
| 1
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCamelCase__: str = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=False ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
UpperCAmelCase : int = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase : List[Any] = convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return flax_state_dict
def snake_case_ ( _lowerCAmelCase : Tuple[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, jnp.ndarray] , _lowerCAmelCase : str , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(_lowerCAmelCase : Tuple[str] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase : Any = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase : Any = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : int = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase : Union[str, Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase : Any = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase : str = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCAmelCase : Dict = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> List[Any]:
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase : Dict = flax_model.params["params"]
else:
UpperCAmelCase : Optional[int] = flax_model.params
UpperCAmelCase : Optional[int] = flatten_dict(SCREAMING_SNAKE_CASE__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase : Any = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase : Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Tuple = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCAmelCase : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase : int = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase : Any = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
UpperCAmelCase : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase : Any = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase : List[str] = jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase : List[Any] = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> int:
import torch
# Load the index
UpperCAmelCase : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase : List[str] = flax_model.params["params"]
UpperCAmelCase : List[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
UpperCAmelCase : Any = flax_model.params
UpperCAmelCase : List[str] = flatten_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCAmelCase : int = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase : int = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase : Dict = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
UpperCAmelCase : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase : List[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase : str = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
if "var" in flax_key[-1]:
UpperCAmelCase : Tuple = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase : Tuple = jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase : str = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Tuple = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as state_f:
try:
UpperCAmelCase : Optional[Any] = from_bytes(SCREAMING_SNAKE_CASE__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCAmelCase : Any = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCAmelCase : Dict = jax.tree_util.tree_map(
lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : List[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Union[str, Any] = pt_model.state_dict()
UpperCAmelCase : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase : Any = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Union[str, Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase : Optional[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase : Dict = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# conv layer
UpperCAmelCase : int = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase : Union[str, Any] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# linear layer
UpperCAmelCase : int = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase : str = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase : Optional[Any] = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase : str = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCAmelCase : List[str] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase : Any = ".".join(SCREAMING_SNAKE_CASE__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase : List[str] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase : Dict = key.split('''.''' )
UpperCAmelCase : Union[str, Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase : Union[str, Any] = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase : str = key_components[-2] + "_v"
if name is not None:
UpperCAmelCase : List[Any] = key_components[:-3] + [name]
UpperCAmelCase : str = ".".join(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Any = key
if flax_key in special_pt_names:
UpperCAmelCase : str = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
UpperCAmelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
UpperCAmelCase : Dict = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 127
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "Wav2Vec2FeatureExtractor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
@classmethod
def A_ ( cls : int, _UpperCAmelCase : Dict, **_UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: ", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase )
def __call__( self : Optional[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("raw_speech" )
else:
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("audio", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("sampling_rate", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Tuple = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"]
return inputs
def A_ ( self : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("input_features", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("labels", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0]
SCREAMING_SNAKE_CASE__ : Dict = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if labels is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ : List[str] = labels["input_ids"]
return input_features
def A_ ( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : Optional[int], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : int = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
| 663
| 0
|
'''simple docstring'''
from __future__ import annotations
def a_ ( __UpperCAmelCase ) -> float:
"""simple docstring"""
snake_case: Optional[Any] =0.00
snake_case: List[str] =0
for resistor in resistors:
if resistor <= 0:
snake_case: Optional[Any] =f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__UpperCAmelCase )
first_sum += 1 / float(__UpperCAmelCase )
index += 1
return 1 / first_sum
def a_ ( __UpperCAmelCase ) -> float:
"""simple docstring"""
snake_case: Any =0.00
snake_case: Dict =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case: int =f'''Resistor at index {index} has a negative value!'''
raise ValueError(__UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a = logging.getLogger(__name__)
class a_ ( snake_case ):
UpperCAmelCase : Any = """sequence-classification"""
def __init__( self : int , a_ : str ) -> str:
if type(a_ ) == dict:
snake_case: List[Any] =Namespace(**a_ )
snake_case: Tuple =glue_output_modes[hparams.task]
snake_case: Any =glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def UpperCamelCase ( self : Tuple , **a_ : Tuple ) -> Union[str, Any]:
return self.model(**a_ )
def UpperCamelCase ( self : int , a_ : Union[str, Any] , a_ : Optional[int] ) -> Optional[int]:
snake_case: Any ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Optional[int] =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: Optional[int] =self(**a_ )
snake_case: Any =outputs[0]
snake_case: Union[str, Any] =self.trainer.lr_schedulers[0]['scheduler']
snake_case: str ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase ( self : str ) -> Tuple:
snake_case: int =self.hparams
snake_case: Union[str, Any] =processors[args.task]()
snake_case: Union[str, Any] =processor.get_labels()
for mode in ["train", "dev"]:
snake_case: Optional[Any] =self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , a_ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
snake_case: int =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
snake_case: Tuple =convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , a_ )
torch.save(a_ , a_ )
def UpperCamelCase ( self : List[Any] , a_ : str , a_ : int , a_ : bool = False ) -> DataLoader:
snake_case: List[Any] ='dev' if mode == 'test' else mode
snake_case: Union[str, Any] =self._feature_file(a_ )
logger.info('Loading features from cached file %s' , a_ )
snake_case: Dict =torch.load(a_ )
snake_case: Union[str, Any] =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case: List[Any] =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
snake_case: str =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
snake_case: Optional[Any] =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
snake_case: Union[str, Any] =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def UpperCamelCase ( self : List[str] , a_ : Optional[int] , a_ : Any ) -> Dict:
snake_case: int ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
snake_case: Tuple =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
snake_case: List[str] =self(**a_ )
snake_case , snake_case: str =outputs[:2]
snake_case: Any =logits.detach().cpu().numpy()
snake_case: Union[str, Any] =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self : int , a_ : Union[str, Any] ) -> tuple:
snake_case: Optional[Any] =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
snake_case: str =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
snake_case: Union[str, Any] =np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
snake_case: Optional[Any] =np.squeeze(a_ )
snake_case: Tuple =np.concatenate([x['target'] for x in outputs] , axis=0 )
snake_case: Any =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: str =[[] for _ in range(out_label_ids.shape[0] )]
snake_case: int ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
snake_case: Union[str, Any] =dict(results.items() )
snake_case: Dict =results
return ret, preds_list, out_label_list
def UpperCamelCase ( self : str , a_ : list ) -> dict:
snake_case , snake_case , snake_case: Union[str, Any] =self._eval_end(a_ )
snake_case: Optional[Any] =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self : Tuple , a_ : Tuple ) -> dict:
snake_case , snake_case , snake_case: int =self._eval_end(a_ )
snake_case: List[Any] =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( a_ : Optional[int] , a_ : Dict ) -> Tuple:
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=a_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=a_ , required=a_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=a_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> Any:
"""simple docstring"""
snake_case: Tuple =argparse.ArgumentParser()
add_generic_args(__UpperCAmelCase , os.getcwd() )
snake_case: List[Any] =GLUETransformer.add_model_specific_args(__UpperCAmelCase , os.getcwd() )
snake_case: Optional[int] =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
snake_case: Optional[int] =os.path.join(
'./results' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
snake_case: str =GLUETransformer(__UpperCAmelCase )
snake_case: Tuple =generic_train(__UpperCAmelCase , __UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
snake_case: str =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__UpperCAmelCase ) )
snake_case: int =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 347
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : str = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Any:
# initialize config
if "resnet-50" in model_name:
a__ : int = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a__ : Optional[int] = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a__ : Tuple = DetrConfig(use_timm_backbone=__a , backbone_config=__a )
# set label attributes
a__ : Any = "panoptic" in model_name
if is_panoptic:
a__ : Dict = 250
else:
a__ : Union[str, Any] = 91
a__ : str = "huggingface/label-files"
a__ : Tuple = "coco-detection-id2label.json"
a__ : List[str] = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
a__ : str = {int(__a ): v for k, v in idalabel.items()}
a__ : Union[str, Any] = idalabel
a__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_ ( __a ) -> Dict:
# here we list all keys to be renamed (original name on the left, our name on the right)
a__ : List[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase_ ( __a , __a , __a ) -> int:
a__ : Tuple = state_dict.pop(__a )
a__ : Any = val
def UpperCamelCase_ ( __a , __a=False ) -> int:
a__ : Optional[int] = ""
if is_panoptic:
a__ : Optional[Any] = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a__ : Optional[int] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : Tuple = in_proj_weight[:256, :]
a__ : List[Any] = in_proj_bias[:256]
a__ : Any = in_proj_weight[256:512, :]
a__ : Dict = in_proj_bias[256:512]
a__ : int = in_proj_weight[-256:, :]
a__ : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a__ : List[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : str = in_proj_weight[:256, :]
a__ : List[str] = in_proj_bias[:256]
a__ : Any = in_proj_weight[256:512, :]
a__ : List[Any] = in_proj_bias[256:512]
a__ : Dict = in_proj_weight[-256:, :]
a__ : List[str] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a__ : List[str] = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a__ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ : str = in_proj_weight_cross_attn[:256, :]
a__ : str = in_proj_bias_cross_attn[:256]
a__ : Dict = in_proj_weight_cross_attn[256:512, :]
a__ : Dict = in_proj_bias_cross_attn[256:512]
a__ : Dict = in_proj_weight_cross_attn[-256:, :]
a__ : Tuple = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_ ( ) -> int:
a__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( __a , __a=None , __a=False ) -> Optional[int]:
a__, a__ : List[str] = get_detr_config(__a )
# load original model from torch hub
a__ : str = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
a__ : int = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=__a ).eval()
a__ : Dict = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__a ):
if is_panoptic:
a__ : Dict = "detr." + src
rename_key(__a , __a , __a )
# query, key and value matrices need special treatment
read_in_q_k_v(__a , is_panoptic=__a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ : Optional[int] = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a__ : Any = state_dict.pop(__a )
a__ : List[str] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ : Optional[Any] = state_dict.pop(__a )
a__ : int = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a__ : Dict = state_dict.pop(__a )
a__ : Optional[int] = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a__ : Tuple = state_dict.pop(__a )
a__ : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
a__ : Optional[Any] = DetrForSegmentation(__a ) if is_panoptic else DetrForObjectDetection(__a )
model.load_state_dict(__a )
model.eval()
# verify our conversion on an image
a__ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection"
a__ : Optional[int] = DetrImageProcessor(format=__a )
a__ : Union[str, Any] = processor(images=prepare_img() , return_tensors="pt" )
a__ : List[str] = encoding["pixel_values"]
a__ : Tuple = detr(__a )
a__ : List[str] = model(__a )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
UpperCamelCase : List[Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
for i in range(length - 1 ):
lowerCamelCase_ = i
for k in range(i + 1 , lowercase ):
if collection[k] < collection[least]:
lowerCamelCase_ = k
if least != i:
lowerCamelCase_ , lowerCamelCase_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowerCamelCase : List[str] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 714
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({} )
UpperCamelCase = "text"
@property
def a__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 651
| 0
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( _UpperCAmelCase :List[Any]=None , _UpperCAmelCase :Union[str, Any]=None ) -> List[Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class UpperCAmelCase_ :
__lowerCAmelCase : str = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
__lowerCAmelCase : Optional[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__lowerCAmelCase : Optional[int] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
__lowerCAmelCase : str = field(
default=_A , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
__lowerCAmelCase : List[str] = field(
default=_A , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
__lowerCAmelCase : Optional[int] = field(
default=_A , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__lowerCAmelCase : Dict = field(default=_A , metadata={"""help""": """Use FP16 to accelerate inference."""} )
__lowerCAmelCase : List[str] = field(default=_A , metadata={"""help""": """Benchmark training of model"""} )
__lowerCAmelCase : Optional[Any] = field(default=_A , metadata={"""help""": """Verbose memory tracing"""} )
__lowerCAmelCase : Dict = field(
default=_A , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
__lowerCAmelCase : Dict = field(
default=_A , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
__lowerCAmelCase : Union[str, Any] = field(default=_A , metadata={"""help""": """Trace memory line by line"""} )
__lowerCAmelCase : Optional[int] = field(default=_A , metadata={"""help""": """Save result to a CSV file"""} )
__lowerCAmelCase : str = field(default=_A , metadata={"""help""": """Save all print statements in a log file"""} )
__lowerCAmelCase : List[Any] = field(default=_A , metadata={"""help""": """Whether to print environment information"""} )
__lowerCAmelCase : Dict = field(
default=_A , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
__lowerCAmelCase : Optional[Any] = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
__lowerCAmelCase : Tuple = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
__lowerCAmelCase : Union[str, Any] = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
__lowerCAmelCase : Any = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
__lowerCAmelCase : Union[str, Any] = field(
default=F'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
__lowerCAmelCase : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
__lowerCAmelCase : List[str] = field(
default=_A , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def __UpperCAmelCase ( self ):
"""simple docstring"""
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' ,_lowerCAmelCase ,)
def __UpperCAmelCase ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 188
|
_a : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_a : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_a : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 145
| 0
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = RoCBertTokenizer
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = filter_non_english
def UpperCamelCase ( self ):
super().setUp()
A__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
A__ = {}
A__ = {}
for i, value in enumerate(__lowerCamelCase ):
A__ = i
A__ = i
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''vocab_file'''] )
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''word_shape_file'''] )
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file,'''w''',encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file,'''w''',encoding='''utf-8''' ) as word_shape_writer:
json.dump(__lowerCamelCase,__lowerCamelCase,ensure_ascii=__lowerCamelCase )
with open(self.word_pronunciation_file,'''w''',encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(__lowerCamelCase,__lowerCamelCase,ensure_ascii=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.tokenizer_class(self.vocab_file,self.word_shape_file,self.word_pronunciation_file )
A__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(__lowerCamelCase,['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ),[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase ),[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase ),[5, 6, 2, 5, 7, 8] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ),['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ),['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ),['''hello'''] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase,strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ),['''h\u00E9llo'''] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase,strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ),['''hello'''] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ),['''hello'''] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ),['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase,strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase,strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ),['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCamelCase,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ),['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCamelCase ( self ):
A__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A__ = {}
for i, token in enumerate(__lowerCamelCase ):
A__ = i
A__ = RoCBertWordpieceTokenizer(vocab=__lowerCamelCase,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ),[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ),['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ),['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCamelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCamelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCamelCase ( self ):
A__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']],[['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
A__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']],[['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
A__ = tokenizer_r.encode_plus(
__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,add_special_tokens=__lowerCamelCase,)
A__ = tokenizer_r.do_lower_case if hasattr(__lowerCamelCase,'''do_lower_case''' ) else False
A__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results],tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results],tokens['''offset_mapping'''] )
def UpperCamelCase ( self ):
A__ = ['''的''', '''人''', '''有''']
A__ = ''''''.join(__lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = True
A__ = self.tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = tokenizer_p.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer_r.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
A__ = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = False
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = self.tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase )
A__ = tokenizer_r.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer_p.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer_r.convert_ids_to_tokens(__lowerCamelCase )
A__ = tokenizer_p.convert_ids_to_tokens(__lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A__ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(__lowerCamelCase )
]
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
A__ = self.tokenizer_class(self.vocab_file,self.word_shape_file,self.word_pronunciation_file )
A__ = tokenizer.encode('''你好''',add_special_tokens=__lowerCamelCase )
A__ = tokenizer.encode('''你是谁''',add_special_tokens=__lowerCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase,__lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCamelCase ( self ):
A__ = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A__ = '''你好,你是谁'''
A__ = tokenizer.tokenize(__lowerCamelCase )
A__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
A__ = tokenizer.convert_tokens_to_shape_ids(__lowerCamelCase )
A__ = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCamelCase )
A__ = tokenizer.prepare_for_model(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = tokenizer.encode_plus(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase,__lowerCamelCase )
| 212
|
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] )->List[str]:
A__ = [1]
for i in range(2 , UpperCamelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
A__ = []
A__ = list(range(UpperCamelCase__ ) )
# Find permutation
while factorials:
A__ = factorials.pop()
A__ , A__ = divmod(UpperCamelCase__ , UpperCamelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212
| 1
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any]=13 , __magic_name__ : Optional[Any]=32 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Dict=[10, 20, 30, 40] , __magic_name__ : str=[2, 2, 3, 2] , __magic_name__ : Tuple=True , __magic_name__ : Any=True , __magic_name__ : Tuple=37 , __magic_name__ : int="gelu" , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : int=["stage2", "stage3", "stage4"] , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=None , ) -> Tuple:
"""simple docstring"""
__snake_case : str = parent
__snake_case : List[Any] = batch_size
__snake_case : Any = image_size
__snake_case : int = num_channels
__snake_case : List[Any] = num_stages
__snake_case : Union[str, Any] = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = is_training
__snake_case : Any = use_labels
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : str = out_features
__snake_case : str = num_labels
__snake_case : Dict = scope
__snake_case : Optional[int] = num_stages
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__magic_name__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__magic_name__ , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = UperNetForSemanticSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : Tuple = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__: Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__: int = False
lowercase__: str = False
lowercase__: Optional[Any] = False
lowercase__: List[str] = False
lowercase__: str = False
lowercase__: str = False
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = UperNetModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any ):
__snake_case : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any = _config_zero_init(__magic_name__ )
__snake_case : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = UperNetForSemanticSegmentation.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : Dict = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
__snake_case : List[str] = Image.open(_lowerCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
__snake_case : int = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(__magic_name__ )
__snake_case : Tuple = prepare_img()
__snake_case : int = processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
with torch.no_grad():
__snake_case : Union[str, Any] = model(**__magic_name__ )
__snake_case : str = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : List[str] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 ) )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
__snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(__magic_name__ )
__snake_case : Union[str, Any] = prepare_img()
__snake_case : Optional[int] = processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
__snake_case : Optional[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : str = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 26
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26
| 1
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int = 1_3 ,SCREAMING_SNAKE_CASE__ : int = 6_4 ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : int = 1_2_8 ,SCREAMING_SNAKE_CASE__ : Tuple=[1_6, 3_2, 6_4, 1_2_8] ,SCREAMING_SNAKE_CASE__ : int = 7 ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 3_7 ,SCREAMING_SNAKE_CASE__ : str = "gelu" ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : int = 1_0 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1_2_8 ,SCREAMING_SNAKE_CASE__ : List[int] = [2, 2, 2, 2] ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,):
__lowerCamelCase : List[Any] = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : str = use_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : List[str] = encoder_stride
__lowerCamelCase : Any = num_attention_outputs
__lowerCamelCase : Optional[Any] = embed_dim
__lowerCamelCase : int = embed_dim + 1
__lowerCamelCase : Tuple = resolution
__lowerCamelCase : Any = depths
__lowerCamelCase : Optional[int] = hidden_sizes
__lowerCamelCase : List[Any] = dim
__lowerCamelCase : List[str] = mlp_expansion_ratio
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Any):
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = TFEfficientFormerModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Dict = self.type_sequence_label_size
__lowerCamelCase : int = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[Any] = TFEfficientFormerForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Tuple = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : str = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : int = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = TFEfficientFormerModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(
self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : Optional[int]):
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def lowerCAmelCase ( self : Union[str, Any]):
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def lowerCAmelCase ( self : Dict):
pass
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : Any = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
if hasattr(self.model_tester ,'encoder_seq_length'):
__lowerCamelCase : Dict = self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,'chunk_length') and self.model_tester.chunk_length > 1:
__lowerCamelCase : int = seq_length * self.model_tester.chunk_length
else:
__lowerCamelCase : Dict = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
__lowerCamelCase : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(SCREAMING_SNAKE_CASE__ ,(list, tuple))
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = getattr(self.model_tester ,'seq_length' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = getattr(self.model_tester ,'decoder_seq_length' ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple=False):
__lowerCamelCase : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Any):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFEfficientFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = True
__lowerCamelCase : List[str] = getattr(self.model_tester ,'seq_length' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = getattr(self.model_tester ,'encoder_seq_length' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = getattr(self.model_tester ,'key_length' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = getattr(self.model_tester ,'chunk_length' ,SCREAMING_SNAKE_CASE__)
if chunk_length is not None and hasattr(self.model_tester ,'num_hashes'):
__lowerCamelCase : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = True
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase : str = True
__lowerCamelCase : Any = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def lowerCAmelCase ( self : Dict):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowerCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowerCamelCase : List[str] = {
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=SCREAMING_SNAKE_CASE__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
__lowerCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : int):
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : int = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
# forward pass
__lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : str = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
__lowerCamelCase : str = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
# forward pass
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : List[str] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 337
|
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if point:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for item in point:
if not isinstance(lowerCamelCase__ , (int, float) ):
__lowerCamelCase : int = (
'Expected a list of numbers as input, found '
F"{type(lowerCamelCase__ ).__name__}"
)
raise TypeError(lowerCamelCase__ )
else:
__lowerCamelCase : Optional[Any] = F"Expected a list of numbers as input, found {type(lowerCamelCase__ ).__name__}"
raise TypeError(lowerCamelCase__ )
else:
raise ValueError('Missing an input' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
_validate_point(lowerCamelCase__ )
_validate_point(lowerCamelCase__ )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
'''simple docstring'''
A : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
A : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' ,__UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE : Optional[Any] = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE : List[Any] = processor.get_labels()
SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__UpperCamelCase ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# Get datasets
SCREAMING_SNAKE_CASE : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase: EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE : Any = np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase ,p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE : str = DataCollatorWithPadding(__UpperCamelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,compute_metrics=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
SCREAMING_SNAKE_CASE : Any = os.path.join(training_args.output_dir ,'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' ,__UpperCamelCase ,__UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 28
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs
| 31
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''layoutlmv3'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=5_0265 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : List[str]=1024 , UpperCAmelCase__ : str=128 , UpperCAmelCase__ : List[Any]=128 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[Any]=128 , UpperCAmelCase__ : str=64 , UpperCAmelCase__ : Optional[Any]=256 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=224 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Any , ) ->Optional[Any]:
super().__init__(
vocab_size=UpperCAmelCase__ , hidden_size=UpperCAmelCase__ , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , intermediate_size=UpperCAmelCase__ , hidden_act=UpperCAmelCase__ , hidden_dropout_prob=UpperCAmelCase__ , attention_probs_dropout_prob=UpperCAmelCase__ , max_position_embeddings=UpperCAmelCase__ , type_vocab_size=UpperCAmelCase__ , initializer_range=UpperCAmelCase__ , layer_norm_eps=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
UpperCAmelCase_ = max_ad_position_embeddings
UpperCAmelCase_ = coordinate_size
UpperCAmelCase_ = shape_size
UpperCAmelCase_ = has_relative_attention_bias
UpperCAmelCase_ = rel_pos_bins
UpperCAmelCase_ = max_rel_pos
UpperCAmelCase_ = has_spatial_attention_bias
UpperCAmelCase_ = rel_ad_pos_bins
UpperCAmelCase_ = max_rel_ad_pos
UpperCAmelCase_ = text_embed
UpperCAmelCase_ = visual_embed
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse('''1.12''' )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCAmelCase__ ( self : List[Any] ) ->float:
return 1e-5
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
return 12
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : "ProcessorMixin" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 40 , UpperCAmelCase__ : int = 40 , ) ->Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , UpperCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
UpperCAmelCase_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = dict(
processor(
UpperCAmelCase__ , text=UpperCAmelCase__ , boxes=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , ) )
return inputs
| 43
|
'''simple docstring'''
lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 43
| 1
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if digit_amount > 0:
return round(number - int(__UpperCamelCase ) , __UpperCamelCase )
return number - int(__UpperCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 301
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> bool:
UpperCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__ ( __UpperCamelCase = 5000 )-> int:
UpperCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCamelCase )]
for i, pentagonal_i in enumerate(__UpperCamelCase ):
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
UpperCamelCase = pentagonal_nums[j]
UpperCamelCase = pentagonal_i + pentagonal_j
UpperCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCamelCase ) and is_pentagonal(__UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f'{solution() = }')
| 301
| 1
|
snake_case__ : dict[tuple[int, int, int], int] = {}
def _snake_case (__lowercase , __lowercase , __lowercase):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase_ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase_ = _calculate(days - 1 , __lowercase , late + 1)
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase_ = _calculate(days - 1 , absent + 1 , 0)
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase_ = _calculate(days - 1 , __lowercase , 0)
UpperCamelCase_ = state_late + state_absent + state_ontime
UpperCamelCase_ = prizestrings
return prizestrings
def _snake_case (__lowercase = 30):
return _calculate(__lowercase , absent=0 , late=0)
if __name__ == "__main__":
print(solution())
| 618
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : Any = logging.get_logger(__name__)
@dataclass
class _a :
"""simple docstring"""
A_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
A_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
A_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.task_name.lower()
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """train"""
A_ = """dev"""
A_ = """test"""
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = Split.train , _UpperCAmelCase = None , ) -> Tuple:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _UpperCAmelCase , )
UpperCamelCase_ = args
UpperCamelCase_ = glue_processors[args.task_name]()
UpperCamelCase_ = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
UpperCamelCase_ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
UpperCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
UpperCamelCase_ = time.time()
UpperCamelCase_ = torch.load(_UpperCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase_ = examples[:limit_length]
UpperCamelCase_ = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
UpperCamelCase_ = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Tuple:
return self.label_list
| 618
| 1
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCamelCase = logging.getLogger(__name__)
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = 'summarization'
_UpperCamelCase : List[str] = ['loss']
_UpperCamelCase : Tuple = ROUGE_KEYS
_UpperCamelCase : Any = 'rouge2'
def __init__( self , snake_case , **snake_case ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCamelCase__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(__lowerCAmelCase , num_labels=__lowerCAmelCase , mode=self.mode , **__lowerCAmelCase )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
UpperCamelCase__ = Path(self.output_dir ) / 'metrics.json'
UpperCamelCase__ = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
UpperCamelCase__ = 0
UpperCamelCase__ = defaultdict(__lowerCAmelCase )
UpperCamelCase__ = self.config.model_type
UpperCamelCase__ = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
UpperCamelCase__ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCamelCase__ = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
UpperCamelCase__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCamelCase__ = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCamelCase__ = get_git_info()['repo_sha']
UpperCamelCase__ = hparams.num_workers
UpperCamelCase__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __lowerCAmelCase ):
UpperCamelCase__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCamelCase__ = self.decoder_start_token_id
UpperCamelCase__ = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
UpperCamelCase__ = False
UpperCamelCase__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCamelCase__ = self.hparams.eval_max_gen_length
else:
UpperCamelCase__ = self.model.config.max_length
UpperCamelCase__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(__lowerCAmelCase , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
UpperCamelCase__ = True
return readable_batch
def snake_case__ ( self , snake_case , **snake_case ):
'''simple docstring'''
return self.model(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.tokenizer.batch_decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return lmap(str.strip , __lowerCAmelCase )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.tokenizer.pad_token_id
UpperCamelCase__ = batch['input_ids'], batch['attention_mask']
UpperCamelCase__ = batch['labels']
if isinstance(self.model , __lowerCAmelCase ):
UpperCamelCase__ = self.model._shift_right(__lowerCAmelCase )
else:
UpperCamelCase__ = shift_tokens_right(__lowerCAmelCase , __lowerCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCamelCase__ = decoder_input_ids
self.save_readable_batch(__lowerCAmelCase )
UpperCamelCase__ = self(__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCamelCase__ = nn.CrossEntropyLoss(ignore_index=__lowerCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCamelCase__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCamelCase__ = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
UpperCamelCase__ = label_smoothed_nll_loss(
__lowerCAmelCase , __lowerCAmelCase , self.hparams.label_smoothing , ignore_index=__lowerCAmelCase )
return (loss,)
@property
def snake_case__ ( self ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self._step(__lowerCAmelCase )
UpperCamelCase__ = dict(zip(self.loss_names , __lowerCAmelCase ) )
# tokens per batch
UpperCamelCase__ = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
UpperCamelCase__ = batch['input_ids'].shape[0]
UpperCamelCase__ = batch['input_ids'].eq(self.pad ).sum()
UpperCamelCase__ = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
return self._generative_step(__lowerCAmelCase )
def snake_case__ ( self , snake_case , snake_case="val" ):
'''simple docstring'''
self.step_count += 1
UpperCamelCase__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCamelCase__ = losses['loss']
UpperCamelCase__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
UpperCamelCase__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCamelCase__ = torch.tensor(__lowerCAmelCase ).type_as(__lowerCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__lowerCAmelCase )
UpperCamelCase__ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
UpperCamelCase__ = self.step_count
self.metrics[prefix].append(__lowerCAmelCase ) # callback writes this to self.metrics_save_path
UpperCamelCase__ = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
return calculate_rouge(__lowerCAmelCase , __lowerCAmelCase )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCamelCase__ = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=__lowerCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCamelCase__ = (time.time() - ta) / batch['input_ids'].shape[0]
UpperCamelCase__ = self.ids_to_clean_text(__lowerCAmelCase )
UpperCamelCase__ = self.ids_to_clean_text(batch["labels"] )
UpperCamelCase__ = self._step(__lowerCAmelCase )
UpperCamelCase__ = dict(zip(self.loss_names , __lowerCAmelCase ) )
UpperCamelCase__ = self.calc_generative_metrics(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = np.mean(lmap(__lowerCAmelCase , __lowerCAmelCase ) )
base_metrics.update(gen_time=__lowerCAmelCase , gen_len=__lowerCAmelCase , preds=__lowerCAmelCase , target=__lowerCAmelCase , **__lowerCAmelCase )
return base_metrics
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
return self._generative_step(__lowerCAmelCase )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
return self.validation_epoch_end(__lowerCAmelCase , prefix="test" )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.n_obs[type_path]
UpperCamelCase__ = self.target_lens[type_path]
UpperCamelCase__ = self.dataset_class(
self.tokenizer , type_path=__lowerCAmelCase , n_obs=__lowerCAmelCase , max_target_length=__lowerCAmelCase , **self.dataset_kwargs , )
return dataset
def snake_case__ ( self , snake_case , snake_case , snake_case = False ):
'''simple docstring'''
UpperCamelCase__ = self.get_dataset(__lowerCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCamelCase__ = dataset.make_sortish_sampler(__lowerCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCAmelCase , num_workers=self.num_workers , sampler=__lowerCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCamelCase__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=__lowerCAmelCase , num_workers=self.num_workers , sampler=__lowerCAmelCase , )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=__lowerCAmelCase )
return dataloader
def snake_case__ ( self ):
'''simple docstring'''
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def snake_case__ ( self ):
'''simple docstring'''
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case__ ( snake_case , snake_case ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
add_generic_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
"--max_source_length" , default=1024 , type=__lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=__lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=__lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=__lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=__lowerCAmelCase )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=__lowerCAmelCase )
parser.add_argument("--max_tokens_per_batch" , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument("--logger_name" , type=__lowerCAmelCase , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=__lowerCAmelCase , default=500 , required=__lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=__lowerCAmelCase , default="summarization" , required=__lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=__lowerCAmelCase , default=0.0 , required=__lowerCAmelCase )
parser.add_argument("--src_lang" , type=__lowerCAmelCase , default="" , required=__lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=__lowerCAmelCase , default="" , required=__lowerCAmelCase )
parser.add_argument("--eval_beams" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase )
parser.add_argument(
"--val_metric" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=__lowerCAmelCase , default=1 , required=__lowerCAmelCase , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = 'translation'
_UpperCamelCase : str = ['loss']
_UpperCamelCase : Tuple = ['bleu']
_UpperCamelCase : Union[str, Any] = 'bleu'
def __init__( self , snake_case , **snake_case ):
'''simple docstring'''
super().__init__(__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = hparams.src_lang
UpperCamelCase__ = hparams.tgt_lang
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
return calculate_bleu(__lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase_( _A :Tuple , _A :List[str]=None )-> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=_A )
check_output_dir(_A , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCamelCase__ = SummarizationModule(_A )
else:
UpperCamelCase__ = TranslationModule(_A )
UpperCamelCase__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
UpperCamelCase__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase__ = os.environ.get("WANDB_PROJECT" , _A )
UpperCamelCase__ = WandbLogger(name=model.output_dir.name , project=_A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase__ = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
UpperCamelCase__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCamelCase__ = False
UpperCamelCase__ = args.val_metric == 'loss'
UpperCamelCase__ = generic_train(
_A , _A , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _A ) , early_stopping_callback=_A , logger=_A , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
UpperCamelCase__ = ''
UpperCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_A ) )
if checkpoints:
UpperCamelCase__ = checkpoints[-1]
UpperCamelCase__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
__UpperCamelCase = pl.Trainer.add_argparse_args(parser)
__UpperCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase = parser.parse_args()
main(args)
| 551
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase=0.01 , __lowerCAmelCase=1_000 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = p_stop
SCREAMING_SNAKE_CASE_ : Dict = max_length
def __iter__( self ):
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.random() < self.p_stop
class snake_case_ ( unittest.TestCase ):
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ):
SCREAMING_SNAKE_CASE_ : int = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
SCREAMING_SNAKE_CASE_ : Tuple = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=False ):
random.seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
SCREAMING_SNAKE_CASE_ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE_ : Dict = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = 42
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE_ : Tuple = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __A ( self ):
Accelerator()
SCREAMING_SNAKE_CASE_ : Any = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 345
| 0
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : str = "The quick brown fox jumps over the lazy dog", ):
"""simple docstring"""
_a = set()
# Replace all the whitespace in our sentence
_a = input_str.replace(''' ''', '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowerCAmelCase ) == 26
def A_ ( _lowerCAmelCase : str = "The quick brown fox jumps over the lazy dog", ):
"""simple docstring"""
_a = [False] * 26
for char in input_str:
if char.islower():
_a = True
elif char.isupper():
_a = True
return all(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : str = "The quick brown fox jumps over the lazy dog", ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ):
"""simple docstring"""
from timeit import timeit
_a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''', setup=_lowerCAmelCase ) )
print(timeit('''is_pangram_faster()''', setup=_lowerCAmelCase ) )
print(timeit('''is_pangram_fastest()''', setup=_lowerCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 285
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Any = ['input_ids', 'attention_mask']
def __init__( self , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=125 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_a = [F'<extra_id_{i}>' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_a = len(set(filter(lambda __UpperCAmelCase : bool('''extra_id''' in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
_a = extra_ids
_a = 2**8 # utf is 8 bits
# define special tokens dict
_a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_a = len(self.special_tokens_encoder )
_a = len(__UpperCAmelCase )
for i, token in enumerate(__UpperCAmelCase ):
_a = self.vocab_size + i - n
_a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _UpperCAmelCase ( self ) -> Dict:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + [1]
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[int]:
if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = self._add_eos_if_not_present(__UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
_a = self._add_eos_if_not_present(__UpperCAmelCase )
return token_ids_a + token_ids_a
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_a = [chr(__UpperCAmelCase ) for i in text.encode('''utf-8''' )]
return tokens
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
if token in self.special_tokens_encoder:
_a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_a = self.added_tokens_encoder[token]
elif len(__UpperCAmelCase ) != 1:
_a = self.unk_token_id
else:
_a = ord(__UpperCAmelCase ) + self._num_special_tokens
return token_id
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if index in self.special_tokens_decoder:
_a = self.special_tokens_decoder[index]
else:
_a = chr(index - self._num_special_tokens )
return token
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
_a = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
_a = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
_a = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
_a = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
_a = token.encode('''utf-8''' )
else:
_a = bytes([ord(__UpperCAmelCase )] )
bstring += tok_string
_a = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
return ()
| 285
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = '▁'
lowercase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowercase_ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
lowercase_ = {
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
lowercase_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class SCREAMING_SNAKE_CASE (__SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : List[str] = []
_UpperCamelCase : Tuple = []
def __init__( self : Optional[int] , a : Tuple , a : Union[str, Any]="<s>" , a : Union[str, Any]="</s>" , a : Dict="</s>" , a : str="<s>" , a : Dict="<unk>" , a : Optional[int]="<pad>" , a : int="<mask>" , a : Tuple=None , a : Tuple=None , a : Optional[Any]=None , a : Optional[Dict[str, Any]] = None , a : Union[str, Any]=None , a : Tuple=False , **a : Optional[int] , )-> List[str]:
"""simple docstring"""
lowercase__ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = legacy_behaviour
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
lowercase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ = 1
lowercase__ = len(self.sp_model )
lowercase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
lowercase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase__ = src_lang if src_lang is not None else "eng_Latn"
lowercase__ = self.lang_code_to_id[self._src_lang]
lowercase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , a : List[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> Tuple:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str )-> None:
"""simple docstring"""
lowercase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
lowercase__ = [1] * len(self.prefix_tokens )
lowercase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any] , a : str , a : Optional[str] , a : Optional[str] , **a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase__ = src_lang
lowercase__ = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowercase__ = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
lowercase__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[Any] )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] )-> Tuple:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[str] )-> int:
"""simple docstring"""
lowercase__ = "".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[str] , a : str = "eng_Latn" , a : Optional[List[str]] = None , a : str = "fra_Latn" , **a : Optional[Any] , )-> BatchEncoding:
"""simple docstring"""
lowercase__ = src_lang
lowercase__ = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[Any] )-> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str )-> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
| 235
|
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowercase ( ) -> None:
assert or_gate(0 ,0 ) == 0
assert or_gate(0 ,1 ) == 1
assert or_gate(1 ,0 ) == 1
assert or_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 293
| 0
|
def snake_case_ ( __lowercase ):
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCAmelCase_ : str = gray_code_sequence_string(lowercase_ )
#
# convert them to integers
for i in range(len(lowercase_ ) ):
UpperCAmelCase_ : Union[str, Any] = int(sequence[i] , 2 )
return sequence
def snake_case_ ( __lowercase ):
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : Optional[int] = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Dict = "0" + smaller_sequence[i]
sequence.append(lowercase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Tuple = "1" + smaller_sequence[i]
sequence.append(lowercase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCAmelCase__:
'''simple docstring'''
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : torch.Tensor # [batch_size x 3]
A_ : int
A_ : int
A_ : float
A_ : float
A_ : Tuple[int]
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.arange(self.height * self.width )
UpperCAmelCase_ : Any = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ : List[Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(__snake_case ) )
UpperCAmelCase_ : str = self.get_image_coords()
UpperCAmelCase_ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Tuple = self.get_camera_rays(__snake_case )
UpperCAmelCase_ : Union[str, Any] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self : Dict , __snake_case : torch.Tensor ):
'''simple docstring'''
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : str = coords.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : int = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[Any] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Optional[Any] = fracs.view(__snake_case , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Union[str, Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ : Optional[int] = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self : Any , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(__lowercase ), np.cos(__lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : str = -z * 4
UpperCAmelCase_ : List[Any] = np.array([np.cos(__lowercase ), -np.sin(__lowercase ), 0.0] )
UpperCAmelCase_ : Tuple = np.cross(__lowercase , __lowercase )
origins.append(__lowercase )
xs.append(__lowercase )
ys.append(__lowercase )
zs.append(__lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowercase , axis=0 ) ).float() , width=__lowercase , height=__lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowercase )) , )
| 641
| 0
|
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 100
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase=125 , lowerCamelCase=None , **lowerCamelCase , ) ->None:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a = [F"""<extra_id_{i}>""" for i in range(lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__a = len(set(filter(lambda lowerCamelCase : bool('extra_id' in str(lowerCamelCase ) ) , lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
super().__init__(
eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , extra_ids=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
__a = extra_ids
__a = 2**8 # utf is 8 bits
# define special tokens dict
__a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__a = len(self.special_tokens_encoder )
__a = len(lowerCamelCase )
for i, token in enumerate(lowerCamelCase ):
__a = self.vocab_size + i - n
__a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __UpperCamelCase ( self , lowerCamelCase ) ->List[int]:
'''simple docstring'''
if len(lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = self._add_eos_if_not_present(lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
__a = self._add_eos_if_not_present(lowerCamelCase )
return token_ids_a + token_ids_a
def __UpperCamelCase ( self , lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = [chr(lowerCamelCase ) for i in text.encode('utf-8' )]
return tokens
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
if token in self.special_tokens_encoder:
__a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__a = self.added_tokens_encoder[token]
elif len(lowerCamelCase ) != 1:
__a = self.unk_token_id
else:
__a = ord(lowerCamelCase ) + self._num_special_tokens
return token_id
def __UpperCamelCase ( self , lowerCamelCase ) ->Tuple:
'''simple docstring'''
if index in self.special_tokens_decoder:
__a = self.special_tokens_decoder[index]
else:
__a = chr(index - self._num_special_tokens )
return token
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
__a = B''
for token in tokens:
if token in self.special_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
__a = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
__a = token.encode('utf-8' )
else:
__a = bytes([ord(lowerCamelCase )] )
bstring += tok_string
__a = bstring.decode('utf-8' , errors='ignore' )
return string
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->Tuple[str]:
'''simple docstring'''
return ()
| 448
| 0
|
from __future__ import annotations
from statistics import mean
def _UpperCAmelCase (UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : int = [0] * no_of_processes
_lowerCAmelCase : str = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : Optional[int] = burst_time[i]
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Optional[int] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_lowerCAmelCase : Any = []
_lowerCAmelCase : Any = -1
for i in range(UpperCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
_lowerCAmelCase : int = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_lowerCAmelCase : Any = i
total_time += burst_time[target_process]
completed += 1
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _UpperCAmelCase (UpperCamelCase_ : list[int] , UpperCamelCase_ : int , UpperCamelCase_ : list[int] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : str = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[str] = [2, 5, 3, 7]
_lowerCamelCase : str = [0, 0, 0, 0]
_lowerCamelCase : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCamelCase : List[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 702
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_lowerCAmelCase : List[Any] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = """sgugger/tiny-distilbert-classification"""
_lowerCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , torchscript=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , fpaa=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : int = AutoConfig.from_pretrained(_UpperCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(_UpperCAmelCase , """env.csv""" ) , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_UpperCAmelCase : int ):
self.assertTrue(hasattr(_UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , """log.txt""" ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """log.txt""" ) ).exists() )
| 196
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger('transformers.models.encodec')
lowerCAmelCase__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
lowerCAmelCase__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
lowerCAmelCase__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
lowerCAmelCase__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
lowerCAmelCase__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
lowerCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
for attribute in key.split("." ):
_A : Dict = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
_A : Union[str, Any] = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
_A : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_A : List[str] = value
elif weight_type == "weight_g":
_A : List[Any] = value
elif weight_type == "weight_v":
_A : Union[str, Any] = value
elif weight_type == "bias":
_A : Any = value
elif weight_type == "running_mean":
_A : Dict = value
elif weight_type == "running_var":
_A : List[Any] = value
elif weight_type == "num_batches_tracked":
_A : Dict = value
elif weight_type == "weight_ih_l0":
_A : List[str] = value
elif weight_type == "weight_hh_l0":
_A : Optional[int] = value
elif weight_type == "bias_ih_l0":
_A : Dict = value
elif weight_type == "bias_hh_l0":
_A : Optional[Any] = value
elif weight_type == "weight_ih_l1":
_A : Optional[Any] = value
elif weight_type == "weight_hh_l1":
_A : List[str] = value
elif weight_type == "bias_ih_l1":
_A : Union[str, Any] = value
elif weight_type == "bias_hh_l1":
_A : List[str] = value
else:
_A : List[Any] = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_A , _A : int = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
_A : Optional[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
_A : List[Any] = MAPPING_24K
elif model_name == "encodec_48khz":
_A : Any = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(f"{name} was ignored" )
continue
_A : int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_A , _A : Union[str, Any] = key.split(".*." )
if prefix in name and suffix in name:
_A : int = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
_A : Optional[int] = True
if "*" in mapped_key:
_A : List[str] = name.split(UpperCamelCase__ )[0].split("." )[-2]
_A : Dict = mapped_key.replace("*" , UpperCamelCase__ )
if "weight_g" in name:
_A : Optional[Any] = "weight_g"
elif "weight_v" in name:
_A : List[str] = "weight_v"
elif "weight_ih_l0" in name:
_A : Any = "weight_ih_l0"
elif "weight_hh_l0" in name:
_A : Optional[int] = "weight_hh_l0"
elif "bias_ih_l0" in name:
_A : int = "bias_ih_l0"
elif "bias_hh_l0" in name:
_A : Union[str, Any] = "bias_hh_l0"
elif "weight_ih_l1" in name:
_A : str = "weight_ih_l1"
elif "weight_hh_l1" in name:
_A : int = "weight_hh_l1"
elif "bias_ih_l1" in name:
_A : int = "bias_ih_l1"
elif "bias_hh_l1" in name:
_A : Dict = "bias_hh_l1"
elif "bias" in name:
_A : Optional[int] = "bias"
elif "weight" in name:
_A : List[str] = "weight"
elif "running_mean" in name:
_A : Optional[int] = "running_mean"
elif "running_var" in name:
_A : Dict = "running_var"
elif "num_batches_tracked" in name:
_A : Dict = "num_batches_tracked"
else:
_A : int = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , ):
if config_path is not None:
_A : List[Any] = EncodecConfig.from_pretrained(UpperCamelCase__ )
else:
_A : List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_A : Union[str, Any] = [8, 5, 4, 4]
_A : Dict = [2.2]
_A : Union[str, Any] = 64
_A : Dict = 32000
_A : Tuple = 2048
_A : Optional[int] = False
_A : int = False
_A : Any = False
elif model_name == "encodec_48khz":
_A : Optional[int] = [8, 5, 4, 2]
_A : int = [3.0, 6.0, 12.0, 24.0]
_A : str = 48000
_A : List[Any] = 2
_A : str = False
_A : List[str] = "time_group_norm"
_A : Dict = True
_A : Optional[Any] = 1.0
_A : List[Any] = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
_A : Optional[Any] = EncodecModel(UpperCamelCase__ )
_A : Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase__ )
_A : int = torch.load(UpperCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_A : List[Any] = original_checkpoint["best_state"]
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 503
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase = None , __lowerCamelCase=None , **__lowerCamelCase , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
_A : Union[str, Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenizer_file=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__lowerCamelCase))
_A : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A : Optional[int] = 1
_A : Dict = len(self.sp_model)
_A : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCamelCase)
}
_A : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
_A : List[str] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_A : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A : int = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
_A : Tuple = src_lang if src_lang is not None else "en_XX"
_A : List[Any] = self.lang_code_to_id[self._src_lang]
_A : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> List[Any]:
_A : Optional[Any] = self.__dict__.copy()
_A : List[str] = None
_A : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowerCamelCase) -> Dict:
_A : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_A : Optional[Any] = {}
_A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _lowerCamelCase ( self) -> Tuple:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase)
_A : Optional[int] = [1] * len(self.prefix_tokens)
_A : Tuple = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCamelCase)) + suffix_ones
return prefix_ones + ([0] * len(__lowerCamelCase)) + ([0] * len(__lowerCamelCase)) + suffix_ones
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Optional[int] = [self.sep_token_id]
_A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : int = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Any = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = tgt_lang_id
return inputs
def _lowerCamelCase ( self) -> Dict:
_A : int = {self.convert_ids_to_tokens(__lowerCamelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A : List[str] = self.sp_model.PieceToId(__lowerCamelCase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
_A : Optional[Any] = "".join(__lowerCamelCase).replace(__lowerCamelCase , " ").strip()
return out_string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __lowerCamelCase)
elif not os.path.isfile(self.vocab_file):
with open(__lowerCamelCase , "wb") as fi:
_A : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase)
return (out_vocab_file,)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Optional[int] = src_lang
_A : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : str = self.lang_code_to_id[src_lang]
_A : Any = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Any = self.lang_code_to_id[lang]
_A : str = []
_A : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 503
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCamelCase__ ( lowerCAmelCase__ ):
'''simple docstring'''
_snake_case = "vivit"
def __init__( self , SCREAMING_SNAKE_CASE=2_24 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=[2, 16, 16] , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=7_68 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=30_72 , SCREAMING_SNAKE_CASE="gelu_fast" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=1e-06 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> List[str]:
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : Any = layer_norm_eps
__lowerCAmelCase : Optional[Any] = image_size
__lowerCAmelCase : Optional[int] = num_frames
__lowerCAmelCase : Tuple = tubelet_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : List[str] = qkv_bias
super().__init__(**_lowerCamelCase )
| 712
|
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( a , unittest.TestCase ):
'''simple docstring'''
_snake_case = MgpstrTokenizer
_snake_case = False
_snake_case = {}
_snake_case = False
def snake_case ( self ) -> int:
super().setUp()
# fmt: off
__lowerCAmelCase : Union[str, Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__lowerCAmelCase : int = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '\n' )
def snake_case ( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase : Any = 'tester'
__lowerCAmelCase : Optional[Any] = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case ( self ) -> int:
pass
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase : Optional[int] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__lowerCAmelCase : Optional[int] = tokenizer.encode([special_token] , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1 )
__lowerCAmelCase : str = tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
self.assertTrue(special_token not in decoded )
def snake_case ( self ) -> Optional[int]:
__lowerCAmelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.get_input_output_texts(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertNotEqual(len(SCREAMING_SNAKE_CASE ) , 0 )
__lowerCAmelCase : int = tokenizer.decode(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(text_a.replace(' ' , '' ) , SCREAMING_SNAKE_CASE )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case ( self ) -> Any:
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case ( self ) -> str:
pass
| 123
| 0
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _A ( ):
"""simple docstring"""
__lowercase = HfArgumentParser(A__ )
__lowercase = parser.parse_args_into_dataclasses()[0]
__lowercase = TensorFlowBenchmark(args=A__ )
try:
__lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowercase = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__lowercase = ''' '''.join(str(A__ ).split(''' ''' )[:-1] )
__lowercase = ''''''
__lowercase = eval(str(A__ ).split(''' ''' )[-1] )
__lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A__ )
if len(A__ ) > 0:
__lowercase = full_error_msg + begin_error_msg + str(A__ )
raise ValueError(A__ )
benchmark.run()
if __name__ == "__main__":
main()
| 41
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
snake_case_ : Any =logging.get_logger(__name__)
snake_case_ : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : List[str] ={
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Dict ={
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
snake_case_ : Optional[int] ={
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[int] = BertTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> List[Any]:
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase__ ) != tokenize_chinese_chars
):
__A = getattr(lowercase__ , normalizer_state.pop("type" ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**lowercase__ )
__A = do_lower_case
def _lowerCamelCase ( self , lowercase__ , lowercase__=None ) -> Any:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , lowercase__ , lowercase__ = None ) -> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__A = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 205
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
snake_case_ : str =logging.get_logger(__name__)
snake_case_ : Any ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Optional[Any] ={
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Optional[int] ={
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
snake_case_ : List[str] ={
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] = RealmTokenizer
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ) -> str:
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase__ ) != tokenize_chinese_chars
):
__A = getattr(lowercase__ , normalizer_state.pop("type" ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**lowercase__ )
__A = do_lower_case
def _lowerCamelCase ( self , lowercase__ , **lowercase__ ) -> Union[str, Any]:
__A = PaddingStrategy.MAX_LENGTH
__A = text
__A = kwargs.pop("text_pair" , lowercase__ )
__A = kwargs.pop("return_tensors" , lowercase__ )
__A = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(lowercase__ ):
if batch_text_pair is not None:
__A = batch_text_pair[idx]
else:
__A = None
__A = super().__call__(lowercase__ , lowercase__ , return_tensors=lowercase__ , **lowercase__ )
__A = encoded_candidates.get("input_ids" )
__A = encoded_candidates.get("attention_mask" )
__A = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(lowercase__ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowercase__ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowercase__ )
__A = {key: item for key, item in output_data.items() if len(lowercase__ ) != 0}
return BatchEncoding(lowercase__ , tensor_type=lowercase__ )
def _lowerCamelCase ( self , lowercase__ , lowercase__=None ) -> Tuple:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , lowercase__ , lowercase__ = None ) -> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__A = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 205
| 1
|
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _UpperCamelCase (a_ ):
snake_case_ = """philschmid/bart-large-cnn-samsum"""
snake_case_ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
snake_case_ = """summarizer"""
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = ["""text"""]
snake_case_ = ["""text"""]
def __UpperCAmelCase ( self , __UpperCamelCase )-> Tuple:
return self.pre_processor(__UpperCamelCase , return_tensors="pt" , truncation=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase )-> Union[str, Any]:
return self.model.generate(**__UpperCamelCase )[0]
def __UpperCAmelCase ( self , __UpperCamelCase )-> str:
return self.pre_processor.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
| 367
|
'''simple docstring'''
import re
def lowerCAmelCase__ ( lowerCamelCase : str ):
if len(re.findall('[ATCG]' ,lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' ,'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 0
|
"""simple docstring"""
from __future__ import annotations
_UpperCamelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__ :
def __init__( self ,A ,A ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def _UpperCamelCase ( self ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase = vertex
queue.append(_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self ,A ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(_SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
UpperCAmelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return self.shortest_path(_SCREAMING_SNAKE_CASE ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_UpperCamelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 708
|
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74
| 0
|
import os
def _lowerCamelCase ( snake_case = "input.txt" ):
with open(os.path.join(os.path.dirname(snake_case ) , snake_case ) ) as input_file:
_lowerCAmelCase = [
[int(snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowerCAmelCase = len(snake_case )
_lowerCAmelCase = len(matrix[0] )
_lowerCAmelCase = [[-1 for _ in range(snake_case )] for _ in range(snake_case )]
for i in range(snake_case ):
_lowerCAmelCase = matrix[i][0]
for j in range(1 , snake_case ):
for i in range(snake_case ):
_lowerCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , snake_case ):
_lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 192
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ :
def __init__( self : str , lowercase__ : Optional[Any] , lowercase__ : str=2 , lowercase__ : Union[str, Any]=3 , lowercase__ : Dict=4 , lowercase__ : Any=2 , lowercase__ : Any=7 , lowercase__ : Dict=True , lowercase__ : Any=True , lowercase__ : Optional[int]=True , lowercase__ : str=True , lowercase__ : Optional[Any]=99 , lowercase__ : int=36 , lowercase__ : Union[str, Any]=3 , lowercase__ : List[Any]=4 , lowercase__ : List[str]=37 , lowercase__ : Optional[Any]="gelu" , lowercase__ : Optional[int]=0.1 , lowercase__ : List[Any]=0.1 , lowercase__ : Tuple=5_12 , lowercase__ : Optional[Any]=16 , lowercase__ : Tuple=2 , lowercase__ : Optional[int]=0.0_2 , lowercase__ : List[str]=6 , lowercase__ : List[Any]=6 , lowercase__ : Optional[int]=3 , lowercase__ : Optional[Any]=4 , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=10_00 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = text_seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = coordinate_size
_lowerCAmelCase = shape_size
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase = text_seq_length
_lowerCAmelCase = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase = bbox[i, j, 3]
_lowerCAmelCase = bbox[i, j, 1]
_lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase = bbox[i, j, 2]
_lowerCAmelCase = bbox[i, j, 0]
_lowerCAmelCase = t
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[Any] ):
_lowerCAmelCase = LayoutLMvaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
# text + image
_lowerCAmelCase = model(lowercase__ , pixel_values=lowercase__ )
_lowerCAmelCase = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
_lowerCAmelCase = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , token_type_ids=lowercase__ )
_lowerCAmelCase = model(lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase = model(pixel_values=lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : int ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = LayoutLMvaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : int ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = LayoutLMvaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , bbox=lowercase__ , pixel_values=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ =(
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = LayoutLMvaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any=False ):
_lowerCAmelCase = copy.deepcopy(lowercase__ )
if model_class in get_values(lowercase__ ):
_lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowercase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase__ ):
_lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
elif model_class in get_values(lowercase__ ):
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
elif model_class in [
*get_values(lowercase__ ),
]:
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
elif model_class in [
*get_values(lowercase__ ),
]:
_lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase__ , )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = LayoutLMvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _lowerCamelCase ( ):
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=lowercase__ ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowercase__ )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=lowercase__ , return_tensors='pt' ).pixel_values.to(lowercase__ )
_lowerCAmelCase = torch.tensor([[1, 2]] )
_lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase = model(
input_ids=input_ids.to(lowercase__ ) , bbox=bbox.to(lowercase__ ) , pixel_values=pixel_values.to(lowercase__ ) , )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase__ )
_lowerCAmelCase = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ) )
| 192
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
@staticmethod
def lowerCamelCase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : str = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
snake_case : str = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = vqa_pipeline(UpperCamelCase__ , top_k=1 )
self.assertEqual(
UpperCamelCase__ , [
[{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}],
[{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}],
] , )
@require_torch
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
snake_case : Any = "./tests/fixtures/tests_samples/COCO/000000039769.png"
snake_case : List[Any] = "How many cats are there?"
snake_case : List[str] = vqa_pipeline(image=UpperCamelCase__ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}, {"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}] )
snake_case : Union[str, Any] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}, {"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}] )
@slow
@require_torch
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
snake_case : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
snake_case : Union[str, Any] = "How many cats are there?"
snake_case : Dict = vqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
snake_case : Union[str, Any] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
snake_case : Union[str, Any] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
| 117
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 117
| 1
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = 5
# Realm tok
_UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(A_ , exist_ok=A_ )
_UpperCamelCase = os.path.join(A_ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(A_ , exist_ok=A_ )
def a ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
_UpperCamelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def a ( self ):
_UpperCamelCase = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def a ( self ):
_UpperCamelCase = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=A_ , )
return block_records
def a ( self ):
_UpperCamelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a ( self ):
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3] , dtype="long" )
_UpperCamelCase = tokenizer(["Test question"] ).input_ids
_UpperCamelCase = tokenizer(
["the fourth"] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="np" )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def a ( self ):
_UpperCamelCase = self.get_config()
_UpperCamelCase = self.get_dummy_retriever()
_UpperCamelCase = retriever.tokenizer
_UpperCamelCase = np.array([0, 3, 5] , dtype="long" )
_UpperCamelCase = tokenizer(["Test question"] ).input_ids
_UpperCamelCase = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
_UpperCamelCase = config.reader_seq_len
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="np" )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def a ( self ):
_UpperCamelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCamelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCamelCase = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCamelCase = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 138
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase = LxmertConfig.from_json_file(__UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
UpperCamelCase = LxmertForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 35
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase )-> int:
UpperCamelCase = [x.strip() for x in open(__UpperCamelCase ).readlines()]
UpperCamelCase = [x.strip() for x in open(__UpperCamelCase ).readlines()][: len(__UpperCamelCase )]
UpperCamelCase = calculate_rouge(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
if save_path is not None:
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 35
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __A() -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase )
return parser.parse_args()
def __A() -> Any:
"""simple docstring"""
_UpperCamelCase = parse_args()
# Import training_script as a module.
_UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCamelCase = script_fpath.stem
_UpperCamelCase = importlib.import_module(lowerCAmelCase )
# Patch sys.argv
_UpperCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 612
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a=0.01 , a=10_00 ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = p_stop
_UpperCamelCase = max_length
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase = random.random() < self.p_stop
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self , a , a , a=False , a=True ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
BatchSamplerShard(a , 2 , a , split_batches=a , even_batches=a )
for i in range(2 )
]
_UpperCamelCase = [list(a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a ) for shard in batch_sampler_shards] , [len(a ) for e in expected] )
self.assertListEqual(a , a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase = [BatchSamplerShard(a , 2 , a , even_batches=a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A_ ( self , a , a , a , a=False , a=2 , a=False ) -> Any:
'''simple docstring'''
random.seed(a )
_UpperCamelCase = list(a )
_UpperCamelCase = [
IterableDatasetShard(
a , batch_size=a , drop_last=a , num_processes=a , process_index=a , split_batches=a , )
for i in range(a )
]
_UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a )
iterable_dataset_lists.append(list(a ) )
_UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(len(a ) % shard_batch_size == 0 )
_UpperCamelCase = []
for idx in range(0 , len(a ) , a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a ) < len(a ):
reference += reference
self.assertListEqual(a , reference[: len(a )] )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
# Edge case with a very small dataset
_UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = SkipBatchSampler(a , 2 )
self.assertListEqual(list(a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
_UpperCamelCase = skip_first_batches(a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
Accelerator()
_UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 612
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
_SCREAMING_SNAKE_CASE = {
'openbmb/cpm-ant-10b': 1_024,
}
def snake_case ( snake_case__ :List[str]) -> Dict:
_A = collections.OrderedDict()
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""") as reader:
_A = reader.readlines()
for index, token in enumerate(UpperCAmelCase__):
_A = token.rstrip("""\n""")
_A = index
return vocab
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<unk>" , lowerCAmelCase_=2_00 ) -> Dict:
_A = vocab
_A = unk_token
_A = max_input_chars_per_word
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_A = list(__UpperCamelCase )
if len(__UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_A = 0
_A = []
while start < len(__UpperCamelCase ):
_A = len(__UpperCamelCase )
_A = None
while start < end:
_A = """""".join(chars[start:end] )
if substr in self.vocab:
_A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCamelCase )
_A = end
return sub_tokens
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = VOCAB_FILES_NAMES
lowerCamelCase :str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :str = ['''input_ids''', '''attention_mask''']
lowerCamelCase :str = False
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<d>" , lowerCAmelCase_="</d>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="</n>" , lowerCAmelCase_="</_>" , lowerCAmelCase_="left" , **lowerCAmelCase_ , ) -> List[str]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__UpperCamelCase , eod_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , unk_token=__UpperCamelCase , line_token=__UpperCamelCase , space_token=__UpperCamelCase , padding_side=__UpperCamelCase , **__UpperCamelCase , )
_A = bod_token
_A = eod_token
_A = load_vocab(__UpperCamelCase )
_A = self.encoder[space_token]
_A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase_ : x[1] ) )
_A = {v: k for k, v in self.encoder.items()}
_A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCAmelCase ( self ) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase ( self ) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase ( self ) -> Tuple:
return self.encoder["\n"]
@property
def UpperCAmelCase ( self ) -> List[Any]:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_A = []
for x in jieba.cut(__UpperCamelCase , cut_all=__UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCamelCase ) )
return output_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ , **lowerCAmelCase_ ) -> Union[str, Any]:
_A = [i for i in token_ids if i >= 0]
_A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
return token in self.encoder
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
return "".join(__UpperCamelCase )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
return self.decoder.get(__UpperCamelCase , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Dict:
if os.path.isdir(__UpperCamelCase ):
_A = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_A = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_A = 0
if " " in self.encoder:
_A = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_A = self.encoder["""\n"""]
del self.encoder["\n"]
_A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase_ : x[1] ) )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
_A = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[str]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase ))
| 714
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83
| 0
|
def UpperCAmelCase__ ( __magic_name__ : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(__magic_name__ )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : List[Any] = arr[mi::-1] + arr[mi + 1 : len(__magic_name__ )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(__magic_name__ )]
cur -= 1
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 348
|
from math import factorial, pi
def UpperCAmelCase__ ( __magic_name__ : float , __magic_name__ : int = 30 ):
'''simple docstring'''
if not isinstance(__magic_name__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowerCAmelCase : Optional[int] = float(__magic_name__ )
lowerCAmelCase : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__magic_name__ ) )
def UpperCAmelCase__ ( __magic_name__ : float , __magic_name__ : int = 30 ):
'''simple docstring'''
if not isinstance(__magic_name__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowerCAmelCase : Dict = float(__magic_name__ )
lowerCAmelCase : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 348
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _UpperCAmelCase :
def __init__( self , a__ , a__=1_3 , a__=7 , a__=False , a__=True , a__=False , a__=False , a__=1_9 , a__=3_2 , a__=5 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=1_6 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def snake_case_ ( self):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = ids_tensor([self.batch_size] , self.num_choices)
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
A__ = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__):
A__ = EsmForProteinFolding(config=a__).float()
model.to(a__)
model.eval()
A__ = model(a__ , attention_mask=a__)
A__ = model(a__)
A__ = model(a__)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase__ = ()
UpperCamelCase__ = {} if is_torch_available() else {}
UpperCamelCase__ = False
def snake_case_ ( self):
A__ = EsmFoldModelTester(self)
A__ = ConfigTester(self , config_class=a__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
@unittest.skip('''Does not support attention outputs''')
def snake_case_ ( self):
pass
@unittest.skip
def snake_case_ ( self):
pass
@unittest.skip('''Esm does not support embedding resizing''')
def snake_case_ ( self):
pass
@unittest.skip('''Esm does not support embedding resizing''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold does not support passing input embeds!''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold only has one output format.''')
def snake_case_ ( self):
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold does not support input chunking.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def snake_case_ ( self):
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''')
def snake_case_ ( self):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def snake_case_ ( self):
pass
@require_torch
class _UpperCAmelCase ( A__ ):
@slow
def snake_case_ ( self):
A__ = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''').float()
model.eval()
A__ = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
A__ = model(a__)['''positions''']
A__ = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a__ , atol=1e-4))
| 526
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''bert'''
def __init__( self , a__=3_0_5_2_2 , a__=7_6_8 , a__=1_2 , a__=1_2 , a__=3_0_7_2 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , **a__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 526
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=snake_case_ ):
"""simple docstring"""
A__ : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase__ ( metaclass=snake_case_ ):
"""simple docstring"""
A__ : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase__ ( metaclass=snake_case_ ):
"""simple docstring"""
A__ : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase__ ( metaclass=snake_case_ ):
"""simple docstring"""
A__ : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase__ ( metaclass=snake_case_ ):
"""simple docstring"""
A__ : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCamelCase__ ( metaclass=snake_case_ ):
"""simple docstring"""
A__ : List[str] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 104
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[Any] ={
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str =[
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 135
| 0
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCAmelCase__ = logging.getLogger(__name__)
class __lowercase (__snake_case ):
_lowerCamelCase = 'sequence-classification'
def __init__( self : Tuple , UpperCAmelCase_ : Optional[Any]):
if type(A_) == dict:
UpperCamelCase__ : Any = Namespace(**A_)
UpperCamelCase__ : Optional[int] = glue_output_modes[hparams.task]
UpperCamelCase__ : Any = glue_tasks_num_labels[hparams.task]
super().__init__(A_ , A_ , self.mode)
def __UpperCamelCase ( self : List[Any] , **UpperCAmelCase_ : int):
return self.model(**A_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ : Optional[int] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase__ : List[Any] = self(**A_)
UpperCamelCase__ : Dict = outputs[0]
UpperCamelCase__ : Optional[Any] = self.trainer.lr_schedulers[0]["scheduler"]
UpperCamelCase__ : int = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Tuple = self.hparams
UpperCamelCase__ : Tuple = processors[args.task]()
UpperCamelCase__ : List[str] = processor.get_labels()
for mode in ["train", "dev"]:
UpperCamelCase__ : str = self._feature_file(A_)
if os.path.exists(A_) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , A_)
else:
logger.info('Creating features from dataset file at %s' , args.data_dir)
UpperCamelCase__ : Optional[Any] = (
processor.get_dev_examples(args.data_dir)
if mode == "dev"
else processor.get_train_examples(args.data_dir)
)
UpperCamelCase__ : Any = convert_examples_to_features(
A_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , A_)
torch.save(A_ , A_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any = False):
UpperCamelCase__ : Dict = "dev" if mode == "test" else mode
UpperCamelCase__ : str = self._feature_file(A_)
logger.info('Loading features from cached file %s' , A_)
UpperCamelCase__ : Dict = torch.load(A_)
UpperCamelCase__ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
UpperCamelCase__ : Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
UpperCamelCase__ : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_) , batch_size=A_ , shuffle=A_ , )
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
UpperCamelCase__ : Union[str, Any] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
UpperCamelCase__ : Tuple = self(**A_)
UpperCamelCase__ : Dict = outputs[:2]
UpperCamelCase__ : Dict = logits.detach().cpu().numpy()
UpperCamelCase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Dict):
UpperCamelCase__ : Tuple = torch.stack([x['val_loss'] for x in outputs]).mean().detach().cpu().item()
UpperCamelCase__ : Optional[int] = np.concatenate([x['pred'] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
UpperCamelCase__ : Any = np.argmax(A_ , axis=1)
elif self.hparams.glue_output_mode == "regression":
UpperCamelCase__ : List[str] = np.squeeze(A_)
UpperCamelCase__ : Union[str, Any] = np.concatenate([x['target'] for x in outputs] , axis=0)
UpperCamelCase__ : List[str] = [[] for _ in range(out_label_ids.shape[0])]
UpperCamelCase__ : Any = [[] for _ in range(out_label_ids.shape[0])]
UpperCamelCase__ : List[str] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , A_ , A_)}
UpperCamelCase__ : Optional[Any] = dict(results.items())
UpperCamelCase__ : str = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Dict):
UpperCamelCase__ : List[Any] = self._eval_end(A_)
UpperCamelCase__ : Union[str, Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : str):
UpperCamelCase__ : List[Any] = self._eval_end(A_)
UpperCamelCase__ : List[Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any):
BaseTransformer.add_model_specific_args(A_ , A_)
parser.add_argument(
'--max_seq_length' , default=128 , type=A_ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=A_ , required=A_ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=A_ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets')
return parser
def __UpperCAmelCase ( ) -> Optional[int]:
UpperCamelCase__ : Dict = argparse.ArgumentParser()
add_generic_args(_lowerCAmelCase , os.getcwd())
UpperCamelCase__ : Optional[int] = GLUETransformer.add_model_specific_args(_lowerCAmelCase , os.getcwd())
UpperCamelCase__ : List[str] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
UpperCamelCase__ : List[Any] = os.path.join(
'./results' , f'{args.task}_{time.strftime("%Y%m%d_%H%M%S")}' , )
os.makedirs(args.output_dir)
UpperCamelCase__ : Optional[int] = GLUETransformer(_lowerCAmelCase)
UpperCamelCase__ : str = generic_train(_lowerCAmelCase , _lowerCAmelCase)
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
UpperCamelCase__ : List[str] = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt') , recursive=_lowerCAmelCase))
UpperCamelCase__ : Tuple = model.load_from_checkpoint(checkpoints[-1])
return trainer.test(_lowerCAmelCase)
if __name__ == "__main__":
main()
| 715
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( __A : list ):
if len(__A ) <= 1:
return lst
a_ : int = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a_ , a_ : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
a_ : Dict = 1
return lst
if __name__ == "__main__":
__lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 466
|
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
a_ : Optional[Any] = []
a_ : Optional[Any] = []
a_ : List[str] = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
a_ : int = len(__A ) if (len(__A ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(__A ) , '''Postfix'''.center(__A ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__A ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__A ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__A ) == 0:
stack.append(__A ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__A ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__A ) # push x to stack
print(
x.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format
while len(__A ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format
return "".join(__A ) # return Postfix as str
def _UpperCAmelCase ( __A : Tuple ):
a_ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__A ) ):
if infix[i] == "(":
a_ : List[str] = ''')''' # change "(" to ")"
elif infix[i] == ")":
a_ : str = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__A ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__lowerCAmelCase = input('\nEnter an Infix Equation = ') # Input an Infix equation
__lowerCAmelCase = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 466
| 1
|
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def lowercase__ ( lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = {}
a__ : str = os.path.join(lowerCamelCase__ , "all_results.json" )
if os.path.exists(lowerCamelCase__ ):
with open(lowerCamelCase__ , "r" ) as f:
a__ : Optional[int] = json.load(lowerCamelCase__ )
else:
raise ValueError(F"can't find {path}" )
return results
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __UpperCAmelCase ( _UpperCamelCase ):
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
import xla_spawn
a__ : int = self.get_auto_remove_tmp_dir()
a__ : Union[str, Any] = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(a_ , "argv" , a_ ):
a__ : Optional[Any] = time()
xla_spawn.main()
a__ : List[str] = time()
a__ : List[Any] = get_results(a_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
import xla_spawn
a__ : Union[str, Any] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(a_ , "argv" , a_ ):
xla_spawn.main()
| 721
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
'''simple docstring'''
a__ : List[str] = ""
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A ( _lowercase ):
'''simple docstring'''
_A = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
_A = 10_24
_A = 40_96
_A = 24
_A = 16
_A = [5, 11, 17, 23]
_A = [2_56, 5_12, 10_24, 10_24]
_A = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
_A = 7_68
_A = [1, 1, 1, 0.5]
_A = [2_56, 5_12, 7_68, 7_68]
_A = 1_50
_A = 16
_A = (1, 3_84, 3_84)
_A = False
_A = '''project'''
if "ade" in checkpoint_url:
_A = True
_A = 7_68
_A = [1, 1, 1, 0.5]
_A = 1_50
_A = 16
_A = '''huggingface/label-files'''
_A = '''ade20k-id2label.json'''
_A = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
_A = {int(_lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __A ( _lowercase ):
'''simple docstring'''
_A = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_A = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_A = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_A = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
_A = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_A = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_A = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
_A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
_A = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_A = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_A = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_A = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_A = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_A = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_A = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_A = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_A = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_A = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_A = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_A = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_A = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_A = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_A = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_A = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_A = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_A = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_A = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_A = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_A = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_A = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_A = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_A = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_A = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_A = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_A = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
_A = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
_A = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
_A = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
_A = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
_A = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
_A = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
_A = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
_A = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_A = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[: config.hidden_size, :]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[
-config.hidden_size :, :
]
_A = in_proj_bias[-config.hidden_size :]
def __A ( ):
'''simple docstring'''
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = get_dpt_config(_lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_A = torch.load(_lowercase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowercase )
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(_lowercase )
_A = val
# read in qkv matrices
read_in_q_k_v(_lowercase , _lowercase )
# load HuggingFace model
_A = DPTForSemanticSegmentation(_lowercase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# Check outputs on an image
_A = 4_80 if '''ade''' in checkpoint_url else 3_84
_A = DPTImageProcessor(size=_lowercase )
_A = prepare_img()
_A = image_processor(_lowercase , return_tensors='''pt''' )
# forward pass
_A = model(**_lowercase ).logits if '''ade''' in checkpoint_url else model(**_lowercase ).predicted_depth
if show_prediction:
_A = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__A = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 484
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484
| 1
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ : str = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case_ : List[str] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
snake_case_ : Dict = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def lowerCamelCase( a__):
def remove_articles(a__):
_SCREAMING_SNAKE_CASE =re.compile(R'''\b(a|an|the)\b''' ,re.UNICODE)
return re.sub(a__ ,''' ''' ,a__)
def white_space_fix(a__):
return " ".join(text.split())
def remove_punc(a__):
_SCREAMING_SNAKE_CASE =set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(a__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__))))
def lowerCamelCase( a__ ,a__):
return int(normalize_answer(a__) == normalize_answer(a__))
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =[any(compute_exact(a__ ,a__) for ref in refs) for pred, refs in zip(a__ ,a__)]
return (sum(a__) / len(a__)) * 100
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =[rgram for rgrams in rgramslist for rgram in rgrams]
_SCREAMING_SNAKE_CASE =Counter(a__)
_SCREAMING_SNAKE_CASE =Counter(a__)
_SCREAMING_SNAKE_CASE =Counter()
for sgram, scount in sgramcounter.items():
_SCREAMING_SNAKE_CASE =scount * numref
_SCREAMING_SNAKE_CASE =Counter(a__)
_SCREAMING_SNAKE_CASE =Counter()
for cgram, ccount in cgramcounter.items():
_SCREAMING_SNAKE_CASE =ccount * numref
# KEEP
_SCREAMING_SNAKE_CASE =sgramcounter_rep & cgramcounter_rep
_SCREAMING_SNAKE_CASE =keepgramcounter_rep & rgramcounter
_SCREAMING_SNAKE_CASE =sgramcounter_rep & rgramcounter
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
if len(a__) > 0:
_SCREAMING_SNAKE_CASE =keeptmpscorea / len(a__)
if len(a__) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_SCREAMING_SNAKE_CASE =keeptmpscorea / sum(keepgramcounterall_rep.values())
_SCREAMING_SNAKE_CASE =0
if keepscore_precision > 0 or keepscore_recall > 0:
_SCREAMING_SNAKE_CASE =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_SCREAMING_SNAKE_CASE =sgramcounter_rep - cgramcounter_rep
_SCREAMING_SNAKE_CASE =delgramcounter_rep - rgramcounter
_SCREAMING_SNAKE_CASE =sgramcounter_rep - rgramcounter
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_SCREAMING_SNAKE_CASE =1
if len(a__) > 0:
_SCREAMING_SNAKE_CASE =deltmpscorea / len(a__)
# ADDITION
_SCREAMING_SNAKE_CASE =set(a__) - set(a__)
_SCREAMING_SNAKE_CASE =set(a__) & set(a__)
_SCREAMING_SNAKE_CASE =set(a__) - set(a__)
_SCREAMING_SNAKE_CASE =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =1
if len(a__) > 0:
_SCREAMING_SNAKE_CASE =addtmpscore / len(a__)
if len(a__) > 0:
_SCREAMING_SNAKE_CASE =addtmpscore / len(a__)
_SCREAMING_SNAKE_CASE =0
if addscore_precision > 0 or addscore_recall > 0:
_SCREAMING_SNAKE_CASE =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =len(a__)
_SCREAMING_SNAKE_CASE =ssent.split(''' ''')
_SCREAMING_SNAKE_CASE =csent.split(''' ''')
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for rsent in rsents:
_SCREAMING_SNAKE_CASE =rsent.split(''' ''')
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
ragramslist.append(a__)
for i in range(0 ,len(a__) - 1):
if i < len(a__) - 1:
_SCREAMING_SNAKE_CASE =ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(a__)
if i < len(a__) - 2:
_SCREAMING_SNAKE_CASE =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(a__)
if i < len(a__) - 3:
_SCREAMING_SNAKE_CASE =ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(a__)
ragramslist.append(a__)
ragramslist.append(a__)
ragramslist.append(a__)
for i in range(0 ,len(a__) - 1):
if i < len(a__) - 1:
_SCREAMING_SNAKE_CASE =sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(a__)
if i < len(a__) - 2:
_SCREAMING_SNAKE_CASE =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(a__)
if i < len(a__) - 3:
_SCREAMING_SNAKE_CASE =sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(a__)
for i in range(0 ,len(a__) - 1):
if i < len(a__) - 1:
_SCREAMING_SNAKE_CASE =cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(a__)
if i < len(a__) - 2:
_SCREAMING_SNAKE_CASE =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(a__)
if i < len(a__) - 3:
_SCREAMING_SNAKE_CASE =cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(a__)
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =SARIngram(a__ ,a__ ,a__ ,a__)
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =SARIngram(a__ ,a__ ,a__ ,a__)
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =SARIngram(a__ ,a__ ,a__ ,a__)
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =SARIngram(a__ ,a__ ,a__ ,a__)
_SCREAMING_SNAKE_CASE =sum([keepascore, keepascore, keepascore, keepascore]) / 4
_SCREAMING_SNAKE_CASE =sum([delascore, delascore, delascore, delascore]) / 4
_SCREAMING_SNAKE_CASE =sum([addascore, addascore, addascore, addascore]) / 4
_SCREAMING_SNAKE_CASE =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase( a__ ,a__ = True ,a__ = "13a" ,a__ = True):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_SCREAMING_SNAKE_CASE =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__).major >= 2:
_SCREAMING_SNAKE_CASE =sacrebleu.metrics.bleu._get_tokenizer(a__)()(a__)
else:
_SCREAMING_SNAKE_CASE =sacrebleu.TOKENIZERS[tokenizer]()(a__)
elif tokenizer == "moses":
_SCREAMING_SNAKE_CASE =sacremoses.MosesTokenizer().tokenize(a__ ,return_str=a__ ,escape=a__)
elif tokenizer == "penn":
_SCREAMING_SNAKE_CASE =sacremoses.MosesTokenizer().penn_tokenize(a__ ,return_str=a__)
else:
_SCREAMING_SNAKE_CASE =sentence
if not return_str:
_SCREAMING_SNAKE_CASE =normalized_sent.split()
return normalized_sent
def lowerCamelCase( a__ ,a__ ,a__):
if not (len(a__) == len(a__) == len(a__)):
raise ValueError('''Sources length must match predictions and references lengths.''')
_SCREAMING_SNAKE_CASE =0
for src, pred, refs in zip(a__ ,a__ ,a__):
sari_score += SARIsent(normalize(a__) ,normalize(a__) ,[normalize(a__) for sent in refs])
_SCREAMING_SNAKE_CASE =sari_score / len(a__)
return 100 * sari_score
def lowerCamelCase( a__ ,a__ ,a__="exp" ,a__=None ,a__=False ,a__=False ,a__=False ,):
_SCREAMING_SNAKE_CASE =len(references[0])
if any(len(a__) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
_SCREAMING_SNAKE_CASE =[[refs[i] for refs in references] for i in range(a__)]
_SCREAMING_SNAKE_CASE =sacrebleu.corpus_bleu(
a__ ,a__ ,smooth_method=a__ ,smooth_value=a__ ,force=a__ ,lowercase=a__ ,use_effective_order=a__ ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __UpperCamelCase ( self : Any , _a : Dict , _a : str , _a : str ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
result.update({'''sari''': compute_sari(sources=_a , predictions=_a , references=_a )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_a , references=_a )} )
result.update({'''exact''': compute_em(predictions=_a , references=_a )} )
return result
| 709
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = LxmertTokenizer
UpperCAmelCase = LxmertTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self : List[str] , _a : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE ='''I was born in 92000, and this is falsé.'''
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 191
| 0
|
'''simple docstring'''
__magic_name__ : str ={0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__magic_name__ : Dict ={0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __snake_case ( lowerCamelCase_ : dict[int, list[int]] , lowerCamelCase_ : int , lowerCamelCase_ : list[bool] ):
'''simple docstring'''
__magic_name__ = True
__magic_name__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
order.append(lowerCamelCase_ )
return order
def __snake_case ( lowerCamelCase_ : dict[int, list[int]] , lowerCamelCase_ : int , lowerCamelCase_ : list[bool] ):
'''simple docstring'''
__magic_name__ = True
__magic_name__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return component
def __snake_case ( lowerCamelCase_ : dict[int, list[int]] ):
'''simple docstring'''
__magic_name__ = len(lowerCamelCase_ ) * [False]
__magic_name__ = {vert: [] for vert in range(len(lowerCamelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase_ )
__magic_name__ = []
for i, was_visited in enumerate(lowerCamelCase_ ):
if not was_visited:
order += topology_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = []
__magic_name__ = len(lowerCamelCase_ ) * [False]
for i in range(len(lowerCamelCase_ ) ):
__magic_name__ = order[len(lowerCamelCase_ ) - i - 1]
if not visited[vert]:
__magic_name__ = find_components(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
components_list.append(lowerCamelCase_ )
return components_list
| 664
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase :Dict = logging.get_logger(__name__)
lowerCAmelCase :int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase :List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCamelCase :
'''simple docstring'''
A_ : str = field(
default=lowercase__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowercase__ )} )
A_ : str = field(
default=lowercase__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
A_ : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A_ : int = field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
A_ : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
A_ : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
A_ : bool = field(
default=lowercase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
A_ : bool = field(
default=lowercase__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
A_ : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
A_ : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
A_ : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
A_ : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = """train"""
A_ : str = """dev"""
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : SquadDataTrainingArguments
A_ : List[SquadFeatures]
A_ : Split
A_ : bool
def __init__( self : Optional[int] , _A : SquadDataTrainingArguments , _A : PreTrainedTokenizer , _A : Optional[int] = None , _A : Union[str, Split] = Split.train , _A : Optional[bool] = False , _A : Optional[str] = None , _A : Optional[str] = "pt" , ) -> List[Any]:
__magic_name__ : int = args
__magic_name__ : Optional[Any] = is_language_sensitive
__magic_name__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
__magic_name__ : List[Any] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
__magic_name__ : List[Any] = mode
# Load data features from cache or dataset file
__magic_name__ : Union[str, Any] = 'v2' if args.version_2_with_negative else 'v1'
__magic_name__ : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__ : Optional[int] = cached_features_file + '.lock'
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__magic_name__ : List[Any] = time.time()
__magic_name__ : Optional[Any] = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__magic_name__ : Optional[Any] = self.old_features['features']
__magic_name__ : str = self.old_features.get('dataset' , _A )
__magic_name__ : str = self.old_features.get('examples' , _A )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
__magic_name__ : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
__magic_name__ : Union[str, Any] = self.processor.get_train_examples(args.data_dir )
__magic_name__ , __magic_name__ : Optional[int] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
__magic_name__ : int = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : List[Any] ) -> Optional[int]:
return len(self.features )
def __getitem__( self : Optional[Any] , _A : int ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
__magic_name__ : Dict = self.features[i]
__magic_name__ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
__magic_name__ : Dict = torch.tensor(feature.attention_mask , dtype=torch.long )
__magic_name__ : Union[str, Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
__magic_name__ : Tuple = torch.tensor(feature.cls_index , dtype=torch.long )
__magic_name__ : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
__magic_name__ : Any = torch.tensor(feature.is_impossible , dtype=torch.float )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__magic_name__ : List[str] = torch.tensor(feature.start_position , dtype=torch.long )
__magic_name__ : Dict = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 561
| 0
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
_a = 0
_a = 1
_a = 2
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params )
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def __lowercase ( self : str , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple , ):
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
lowerCAmelCase = handle_long_generation
preprocess_params.update(lowerCAmelCase )
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self : Optional[int] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase )
def __call__( self : Optional[Any] , lowerCAmelCase : Tuple , **lowerCAmelCase : int ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def __lowercase ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int="" , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[Any] ):
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def __lowercase ( self : Dict , lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=ReturnType.FULL_TEXT , lowerCAmelCase : Any=True ):
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(lowerCAmelCase )
return records
| 529
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def __lowercase ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type="""numpy""" ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type="""numpy""" , return_dict=lowerCAmelCase )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : str ):
lowerCAmelCase = """google/ncsnpp-celebahq-256"""
lowerCAmelCase = UNetaDModel.from_pretrained(lowerCAmelCase )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=lowerCAmelCase , output_type="""numpy""" ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 529
| 1
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCAmelCase_ = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCAmelCase_ = {
'jukebox': 5_1_2,
}
class A (_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_LYRIC_TOKENS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=["v3", "v2", "v2"] , lowercase_=512 , lowercase_=5 , lowercase_="<|endoftext|>" , **lowercase_ , ) -> List[Any]:
'''simple docstring'''
_snake_case : Optional[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
super().__init__(
unk_token=__snake_case , n_genres=__snake_case , version=__snake_case , max_n_lyric_tokens=__snake_case , **__snake_case , )
_snake_case : Union[str, Any] = version
_snake_case : Dict = max_n_lyric_tokens
_snake_case : List[Any] = n_genres
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
_snake_case : List[str] = json.load(__snake_case )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
_snake_case : Tuple = json.load(__snake_case )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
_snake_case : Optional[int] = json.load(__snake_case )
_snake_case : str = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_snake_case : Union[str, Any] = oov.replace(R'''\-\'''' , R'''\-+\'''' )
_snake_case : List[str] = regex.compile(__snake_case )
_snake_case : str = {v: k for k, v in self.artists_encoder.items()}
_snake_case : int = {v: k for k, v in self.genres_encoder.items()}
_snake_case : str = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __a ( self ) -> List[Any]:
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __a ( self ) -> Any:
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __a ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
_snake_case : str = [self.artists_encoder.get(__snake_case , 0 ) for artist in list_artists]
for genres in range(len(__snake_case ) ):
_snake_case : Any = [self.genres_encoder.get(__snake_case , 0 ) for genre in list_genres[genres]]
_snake_case : Dict = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_snake_case : Dict = [[self.lyrics_encoder.get(__snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __a ( self , lowercase_ ) -> str:
'''simple docstring'''
return list(__snake_case )
def __a ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Dict = self.prepare_for_tokenization(__snake_case , __snake_case , __snake_case )
_snake_case : Optional[int] = self._tokenize(__snake_case )
return artist, genre, lyrics
def __a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = False ) -> Dict:
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_snake_case : Dict = artists[idx].lower()
_snake_case : Optional[Any] = [genres[idx].lower()]
else:
_snake_case : Optional[int] = self._normalize(artists[idx] ) + '''.v2'''
_snake_case : Dict = [
self._normalize(__snake_case ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_snake_case : List[str] = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
_snake_case : Dict = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
_snake_case : Tuple = {vocab[index]: index + 1 for index in range(len(__snake_case ) )}
_snake_case : List[str] = 0
_snake_case : Dict = len(__snake_case ) + 1
_snake_case : str = self.vocab
_snake_case : Optional[int] = {v: k for k, v in self.vocab.items()}
_snake_case : Any = ''''''
else:
_snake_case : List[Any] = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
_snake_case : Optional[int] = self._run_strip_accents(__snake_case )
_snake_case : Union[str, Any] = lyrics.replace('''\\''' , '''\n''' )
_snake_case : List[str] = self.out_of_vocab.sub('''''' , __snake_case ), [], []
return artists, genres, lyrics
def __a ( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
_snake_case : Dict = unicodedata.normalize('''NFD''' , __snake_case )
_snake_case : Any = []
for char in text:
_snake_case : List[Any] = unicodedata.category(__snake_case )
if cat == "Mn":
continue
output.append(__snake_case )
return "".join(__snake_case )
def __a ( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
_snake_case : List[str] = (
[chr(__snake_case ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(__snake_case ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(__snake_case ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
_snake_case : Optional[Any] = frozenset(__snake_case )
_snake_case : Optional[Any] = re.compile(R'''_+''' )
_snake_case : Optional[int] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
_snake_case : Union[str, Any] = pattern.sub('''_''' , __snake_case ).strip('''_''' )
return text
def __a ( self , lowercase_ ) -> int:
'''simple docstring'''
return " ".join(__snake_case )
def __a ( self , lowercase_ , lowercase_ = None , lowercase_ = False ) -> int:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
_snake_case : List[Any] = TensorType(__snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
_snake_case : int = tf.constant
_snake_case : Optional[Any] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
_snake_case : List[str] = torch.tensor
_snake_case : str = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
_snake_case : int = jnp.array
_snake_case : Dict = _is_jax
else:
_snake_case : List[str] = np.asarray
_snake_case : Any = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_snake_case : List[str] = [inputs]
if not is_tensor(__snake_case ):
_snake_case : str = as_tensor(__snake_case )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self , lowercase_ , lowercase_ , lowercase_="" , lowercase_="pt" ) -> List[str]:
'''simple docstring'''
_snake_case : Tuple = [0, 0, 0]
_snake_case : Optional[int] = [artist] * len(self.version )
_snake_case : Optional[Any] = [genres] * len(self.version )
_snake_case , _snake_case , _snake_case : List[Any] = self.tokenize(__snake_case , __snake_case , __snake_case )
_snake_case , _snake_case , _snake_case : List[Any] = self._convert_token_to_id(__snake_case , __snake_case , __snake_case )
_snake_case : int = [-INFINITY] * len(full_tokens[-1] )
_snake_case : Union[str, Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def __a ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : Optional[Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__snake_case ) )
_snake_case : int = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__snake_case ) )
_snake_case : List[str] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__snake_case ) )
return (artists_file, genres_file, lyrics_file)
def __a ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
_snake_case : Dict = self.artists_decoder.get(__snake_case )
_snake_case : Union[str, Any] = [self.genres_decoder.get(__snake_case ) for genre in genres_index]
_snake_case : Tuple = [self.lyrics_decoder.get(__snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 326
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Optional[int] = False
class UpperCAmelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
A_ = torch.manual_seed(0 )
A_ = pipe(
image=__snake_case ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ,).images
A_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 188
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = '''openai/whisper-base'''
__a : int = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__a : Union[str, Any] = '''transcriber'''
__a : List[Any] = WhisperProcessor
__a : List[Any] = WhisperForConditionalGeneration
__a : Optional[Any] = ['''audio''']
__a : Optional[int] = ['''text''']
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors='''pt''' ).input_features
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return self.model.generate(inputs=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0]
| 712
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a : str = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
elif args.student_type == "gpt2":
__lowercase = False
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=lowercase , required=lowercase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=lowercase , required=lowercase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=lowercase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=lowercase , required=lowercase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=lowercase , type=lowercase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=lowercase , required=lowercase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=lowercase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=lowercase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=lowercase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=lowercase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=lowercase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=lowercase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=lowercase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=lowercase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=lowercase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=lowercase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=lowercase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=lowercase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=lowercase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=lowercase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowercase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=lowercase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=lowercase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=lowercase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowercase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=lowercase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=lowercase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=lowercase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=lowercase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=lowercase , default=4000 , help='''Checkpoint interval.''' )
__lowercase = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.student_type]
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowercase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowercase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowercase = tokenizer.all_special_tokens.index(lowercase )
__lowercase = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
__lowercase = special_tok_ids
__lowercase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
__lowercase = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowercase = 0.0 # do not predict special tokens
__lowercase = torch.from_numpy(lowercase )
else:
__lowercase = None
__lowercase = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
__lowercase = student_config_class.from_pretrained(args.student_config )
__lowercase = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
__lowercase = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
__lowercase = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
__lowercase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowercase = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 522
| 0
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def _snake_case ( self , lowercase , lowercase ) -> List[str]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy'
def _snake_case ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self , lowercase=0 , lowercase=(4, 4, 64, 64) , lowercase=False ) -> Union[str, Any]:
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return image
def _snake_case ( self , lowercase=False , lowercase="CompVis/stable-diffusion-v1-4" ) -> Optional[Any]:
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = """bf16""" if fpaa else None
lowerCAmelCase , lowerCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder="""unet""" , dtype=lowercase , revision=lowercase )
return model, params
def _snake_case ( self , lowercase=0 , lowercase=(4, 77, 768) , lowercase=False ) -> str:
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) , dtype=lowercase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1_000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase , lowerCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=lowercase )
lowerCAmelCase = self.get_latents(lowercase , fpaa=lowercase )
lowerCAmelCase = self.get_encoder_hidden_states(lowercase , fpaa=lowercase )
lowerCAmelCase = model.apply(
{"""params""": params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1_000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=lowercase )
lowerCAmelCase = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase )
lowerCAmelCase = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1_024) , fpaa=lowercase )
lowerCAmelCase = model.apply(
{"""params""": params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa ) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase = jnp.array(lowercase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1e-2 )
| 532
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = ScoreSdeVeScheduler()
lowerCAmelCase = ScoreSdeVePipeline(unet=lowercase , scheduler=lowercase )
sde_ve.to(lowercase )
sde_ve.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowercase ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowercase , return_dict=lowercase )[
0
]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = """google/ncsnpp-church-256"""
lowerCAmelCase = UNetaDModel.from_pretrained(lowercase )
lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(lowercase )
lowerCAmelCase = ScoreSdeVePipeline(unet=lowercase , scheduler=lowercase )
sde_ve.to(lowercase )
sde_ve.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=lowercase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 532
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "encoder-decoder"
snake_case__ = True
def __init__( self : str ,**lowerCamelCase__ : Any ):
super().__init__(**lowerCamelCase__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase__ = kwargs.pop('encoder' )
UpperCAmelCase__ = encoder_config.pop('model_type' )
UpperCAmelCase__ = kwargs.pop('decoder' )
UpperCAmelCase__ = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase__ = AutoConfig.for_model(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = AutoConfig.for_model(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[str] ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : PretrainedConfig ,**lowerCamelCase__ : Dict ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
UpperCAmelCase__ = True
UpperCAmelCase__ = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.encoder.to_dict()
UpperCAmelCase__ = self.decoder.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 720
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a_ ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def a_ ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
lowerCAmelCase__ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCAmelCase__ : str = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase__ : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase__ : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 632
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.