code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Tuple:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
_UpperCAmelCase = torch.permute(__snake_case , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ):
# linear layer
_UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
_UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> List[str]:
if "metadata" in layer:
_UpperCAmelCase = layer.split("""metadata""" )
_UpperCAmelCase = """""".join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_UpperCAmelCase = layer.split("""kvstore""" )
_UpperCAmelCase = """""".join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_UpperCAmelCase = layer.split("""/""" )
_UpperCAmelCase = """/""".join(split_layer[:-1] )
_UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCAmelCase = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_UpperCAmelCase = """file"""
else:
_UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[Any]:
_UpperCAmelCase = rename_keys(__snake_case )
_UpperCAmelCase = {}
for k, v in current_block.items():
_UpperCAmelCase = v
_UpperCAmelCase = new_current_block
torch.save(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = WEIGHTS_NAME ) -> Any:
_UpperCAmelCase = convert_file_size_to_int(__snake_case )
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
os.makedirs(__snake_case , exist_ok=__snake_case )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_UpperCAmelCase = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_UpperCAmelCase = flatten_dict(__snake_case , sep="""/""" )
_UpperCAmelCase = {}
for layer in checkpoint_info.keys():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_key_and_tensorstore_dict(
__snake_case , __snake_case , __snake_case )
if curr_real_layer_name in all_layers:
_UpperCAmelCase = content
else:
_UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCAmelCase = torch.tensor(__snake_case )
_UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCAmelCase , _UpperCAmelCase = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __snake_case )
_UpperCAmelCase = """/""".join(__snake_case )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCAmelCase = os.path.join(
__snake_case , weights_name.replace(""".bin""" , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__snake_case , __snake_case )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = raw_weights.to(getattr(__snake_case , __snake_case ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCAmelCase = os.path.join(__snake_case , weights_name.replace(""".bin""" , f"""-{len(__snake_case )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__snake_case , __snake_case )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__snake_case ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for idx, shard in enumerate(__snake_case ):
_UpperCAmelCase = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(__snake_case ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_UpperCAmelCase = os.path.join(__snake_case , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
_UpperCAmelCase = shard
for key in shard:
_UpperCAmelCase = shard_file
# Add the metadata
_UpperCAmelCase = {"""total_size""": total_size}
_UpperCAmelCase = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__snake_case , __snake_case ) , """w""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + """\n"""
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
__a: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__a: int = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_UpperCAmelCase = TaTokenizer.from_pretrained("""t5-small""" )
_UpperCAmelCase = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_UpperCAmelCase = tokenizer(__snake_case , return_tensors="""pt""" ).input_ids
_UpperCAmelCase = model.generate(__snake_case , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 108
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
_UpperCAmelCase = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
_UpperCAmelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCAmelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase , atol=1E-3 ) )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
_UpperCAmelCase = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
_UpperCAmelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCAmelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase , atol=1E-3 ) )
| 108
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = AudioLDMPipeline
_a = TEXT_TO_AUDIO_PARAMS
_a = TEXT_TO_AUDIO_BATCH_PARAMS
_a = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def __lowercase ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCAmelCase , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
lowerCAmelCase = ClapTextModelWithProjection(lowerCAmelCase )
lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCAmelCase , )
lowerCAmelCase = SpeechTaHifiGan(lowerCAmelCase )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def __lowercase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]=0 ):
if str(lowerCAmelCase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
lowerCAmelCase = audioldm_pipe(**lowerCAmelCase )
lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase ) == 256
lowerCAmelCase = audio[:10]
lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase = audioldm_pipe(**lowerCAmelCase )
lowerCAmelCase = output.audios[0]
lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
lowerCAmelCase = audioldm_pipe.tokenizer(
lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
lowerCAmelCase = text_inputs["""input_ids"""].to(lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.text_encoder(
lowerCAmelCase , )
lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase = F.normalize(lowerCAmelCase , dim=-1 )
lowerCAmelCase = prompt_embeds
# forward
lowerCAmelCase = audioldm_pipe(**lowerCAmelCase )
lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
lowerCAmelCase = 3 * ["""this is a negative prompt"""]
lowerCAmelCase = negative_prompt
lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase = audioldm_pipe(**lowerCAmelCase )
lowerCAmelCase = output.audios[0]
lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
lowerCAmelCase = []
for p in [prompt, negative_prompt]:
lowerCAmelCase = audioldm_pipe.tokenizer(
lowerCAmelCase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors="""pt""" , )
lowerCAmelCase = text_inputs["""input_ids"""].to(lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.text_encoder(
lowerCAmelCase , )
lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase = F.normalize(lowerCAmelCase , dim=-1 )
embeds.append(lowerCAmelCase )
lowerCAmelCase , lowerCAmelCase = embeds
# forward
lowerCAmelCase = audioldm_pipe(**lowerCAmelCase )
lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __lowercase ( self : str ):
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
lowerCAmelCase = AudioLDMPipeline(**lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
lowerCAmelCase = """egg cracking"""
lowerCAmelCase = audioldm_pipe(**lowerCAmelCase , negative_prompt=lowerCAmelCase )
lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase ) == 256
lowerCAmelCase = audio[:10]
lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __lowercase ( self : int ):
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
lowerCAmelCase = AudioLDMPipeline(**lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowerCAmelCase = audioldm_pipe(lowerCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCAmelCase = 2
lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCAmelCase = 2
lowerCAmelCase = audioldm_pipe(lowerCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCAmelCase = 2
lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **lowerCAmelCase )
lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase ) / vocoder_sampling_rate == 0.016
lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **lowerCAmelCase )
lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase ) / vocoder_sampling_rate == 0.032
def __lowercase ( self : Any ):
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = AudioLDMPipeline(**lowerCAmelCase )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = ["""hey"""]
lowerCAmelCase = audioldm_pipe(lowerCAmelCase , num_inference_steps=1 )
lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCAmelCase = SpeechTaHifiGan(lowerCAmelCase ).to(lowerCAmelCase )
lowerCAmelCase = audioldm_pipe(lowerCAmelCase , num_inference_steps=1 )
lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __lowercase ( self : List[str] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCAmelCase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowercase ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]="cpu" , lowerCAmelCase : List[str]=torch.floataa , lowerCAmelCase : List[str]=0 ):
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCAmelCase = np.random.RandomState(lowerCAmelCase ).standard_normal((1, 8, 128, 16) )
lowerCAmelCase = torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase )
lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = self.get_inputs(lowerCAmelCase )
lowerCAmelCase = 25
lowerCAmelCase = audioldm_pipe(**lowerCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase ) == 8_1920
lowerCAmelCase = audio[7_7230:7_7240]
lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCAmelCase = audioldm_pipe.to(lowerCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCAmelCase = self.get_inputs(lowerCAmelCase )
lowerCAmelCase = audioldm_pipe(**lowerCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase ) == 8_1920
lowerCAmelCase = audio[2_7780:2_7790]
lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 529
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a = 'src/transformers'
a = 'docs/source/en'
a = '.'
def lowercase (snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Tuple ) -> Any:
'''simple docstring'''
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase = f.readlines()
# Find the start prompt.
lowerCAmelCase = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
a = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
a = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
a = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase (snake_case__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , snake_case__ )
return [m.group(0 ) for m in matches]
def lowercase (snake_case__ : str , snake_case__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase = 2 if text == """✅""" or text == """❌""" else len(snake_case__ )
lowerCAmelCase = (width - text_length) // 2
lowerCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase () -> str:
'''simple docstring'''
lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
lowerCAmelCase = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case__ ):
lowerCAmelCase = None
if attr_name.endswith("""Tokenizer""" ):
lowerCAmelCase = slow_tokenizers
lowerCAmelCase = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowerCAmelCase = fast_tokenizers
lowerCAmelCase = attr_name[:-13]
elif _re_tf_models.match(snake_case__ ) is not None:
lowerCAmelCase = tf_models
lowerCAmelCase = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
lowerCAmelCase = flax_models
lowerCAmelCase = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
lowerCAmelCase = pt_models
lowerCAmelCase = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase = True
break
# Try again after removing the last word in the name
lowerCAmelCase = """""".join(camel_case_split(snake_case__ )[:-1] )
# Let's build that table!
lowerCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase = [len(snake_case__ ) + 2 for c in columns]
lowerCAmelCase = max([len(snake_case__ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase = """|""" + """|""".join([_center_text(snake_case__ , snake_case__ ) for c, w in zip(snake_case__ , snake_case__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowerCAmelCase = {True: """✅""", False: """❌"""}
for name in model_names:
lowerCAmelCase = model_name_to_prefix[name]
lowerCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case__ , snake_case__ ) for l, w in zip(snake_case__ , snake_case__ )] ) + "|\n"
return table
def lowercase (snake_case__ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = _find_text_in_file(
filename=os.path.join(snake_case__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowerCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 529
| 1
|
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
SCREAMING_SNAKE_CASE__ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
SCREAMING_SNAKE_CASE__ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase( datasets.Metric ):
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Optional[int] = np.array([re.sub(SCREAMING_SNAKE_CASE__ , '' , SCREAMING_SNAKE_CASE__ ) for x in predictions] )
__a : Any = np.array([re.sub(SCREAMING_SNAKE_CASE__ , '' , SCREAMING_SNAKE_CASE__ ) for x in references] )
else:
__a : List[Any] = np.asarray(SCREAMING_SNAKE_CASE__ )
__a : int = np.asarray(SCREAMING_SNAKE_CASE__ )
if ignore_case:
__a : Optional[int] = np.char.lower(SCREAMING_SNAKE_CASE__ )
__a : str = np.char.lower(SCREAMING_SNAKE_CASE__ )
if ignore_punctuation:
__a : Tuple = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Optional[Any] = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ )
__a : Any = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ )
if ignore_numbers:
__a : Tuple = string.digits.maketrans('' , '' , string.digits )
__a : str = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ )
__a : Tuple = np.char.translate(SCREAMING_SNAKE_CASE__ , table=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = predictions == references
return {"exact_match": np.mean(SCREAMING_SNAKE_CASE__ ) * 1_0_0}
| 47
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
snake_case_ : List[str] = logging.get_logger(__name__)
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 212
| 0
|
"""simple docstring"""
import math
import sys
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : Optional[int] = ''''''
try:
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as binary_file:
_lowerCamelCase : Any = binary_file.read()
for dat in data:
_lowerCamelCase : List[str] = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : Any = {'''0''': '''0''', '''1''': '''1'''}
_lowerCamelCase, _lowerCamelCase : str = '''''', ''''''
_lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCamelCase : Union[str, Any] = lexicon[curr_string]
result += last_match_id
_lowerCamelCase : Any = last_match_id + '''0'''
if math.loga(SCREAMING_SNAKE_CASE_ ).is_integer():
_lowerCamelCase : Tuple = {}
for curr_key in list(SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : List[str] = lexicon.pop(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = new_lex
_lowerCamelCase : Dict = last_match_id + '''1'''
index += 1
_lowerCamelCase : List[str] = ''''''
return result
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->None:
_lowerCamelCase : Optional[Any] = 8
try:
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as opened_file:
_lowerCamelCase : str = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(SCREAMING_SNAKE_CASE_ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : str = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCamelCase : Optional[int] = data_bits[counter:]
_lowerCamelCase : int = data_bits[counter + 1 :]
return data_bits
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->None:
_lowerCamelCase : int = read_file_binary(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = remove_prefix(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[Any] = decompress_data(SCREAMING_SNAKE_CASE_ )
write_file_binary(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 558
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 558
| 1
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__: Any = ['''pixel_values''']
def __init__( self , A__ = True , A__ = 32 , A__=PILImageResampling.BILINEAR , A__ = True , **A__ , ):
A__ : Union[str, Any] = do_resize
A__ : Tuple = do_rescale
A__ : Tuple = size_divisor
A__ : Any = resample
super().__init__(**_lowercase )
def __A ( self , A__ , A__ , A__ , A__ = None , **A__ ):
A__ , A__ : List[str] = get_image_size(_lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
A__ : Dict = height // size_divisor * size_divisor
A__ : Union[str, Any] = width // size_divisor * size_divisor
A__ : Any = resize(_lowercase , (new_h, new_w) , resample=_lowercase , data_format=_lowercase , **_lowercase )
return image
def __A ( self , A__ , A__ , A__ = None , **A__ ):
return rescale(image=_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __A ( self , A__ , A__ = None , A__ = None , A__=None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
A__ : Tuple = do_resize if do_resize is not None else self.do_resize
A__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A__ : Tuple = size_divisor if size_divisor is not None else self.size_divisor
A__ : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
A__ : Optional[int] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
A__ : List[Any] = [to_numpy_array(_lowercase ) for img in images]
if do_resize:
A__ : str = [self.resize(_lowercase , size_divisor=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
A__ : List[str] = [self.rescale(_lowercase , scale=1 / 255 ) for image in images]
A__ : Optional[int] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A__ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 456
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5
| 0
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__snake_case = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : str=7 , __magic_name__ : List[str]=3 , __magic_name__ : List[str]=1_8 , __magic_name__ : List[Any]=3_0 , __magic_name__ : Optional[int]=4_0_0 , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=None , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""height""": 2_0, """width""": 2_0}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = do_convert_rgb
UpperCamelCase = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCamelCase = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
UpperCamelCase = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = PixaStructImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = PixaStructImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase = 2_0_4_8
UpperCamelCase = image_processor(__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__magic_name__ ):
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
UpperCamelCase = """Hello"""
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ , header_text=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ , header_text=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
UpperCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = PixaStructImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase = 3
@property
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_convert_rgb""" ) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__magic_name__ , return_tensors="""pt""" , max_patches=__magic_name__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 711
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCamelCase = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCamelCase = {"""unk_token""": """<unk>"""}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
UpperCamelCase = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
UpperCamelCase = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : str , **__magic_name__ : Tuple ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__magic_name__ )
def lowerCamelCase_ ( self : Optional[int] , **__magic_name__ : str ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__magic_name__ )
def lowerCamelCase_ ( self : Optional[int] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __magic_name__ )
self.assertIsInstance(processor_fast.tokenizer , __magic_name__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __magic_name__ )
self.assertIsInstance(processor_fast.image_processor , __magic_name__ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase = self.get_image_processor(do_normalize=__magic_name__ )
UpperCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(__magic_name__ , return_tensors="""np""" )
UpperCamelCase = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = """lower newer"""
UpperCamelCase = processor(text=__magic_name__ , return_tensors="""np""" )
UpperCamelCase = tokenizer(__magic_name__ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = """lower newer"""
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """google/owlvit-base-patch32"""
UpperCamelCase = OwlViTProcessor.from_pretrained(__magic_name__ )
UpperCamelCase = ["""cat""", """nasa badge"""]
UpperCamelCase = processor(text=__magic_name__ )
UpperCamelCase = 1_6
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = """google/owlvit-base-patch32"""
UpperCamelCase = OwlViTProcessor.from_pretrained(__magic_name__ )
UpperCamelCase = [["""cat""", """nasa badge"""], ["""person"""]]
UpperCamelCase = processor(text=__magic_name__ )
UpperCamelCase = 1_6
UpperCamelCase = len(__magic_name__ )
UpperCamelCase = max([len(__magic_name__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = """google/owlvit-base-patch32"""
UpperCamelCase = OwlViTProcessor.from_pretrained(__magic_name__ )
UpperCamelCase = ["""cat""", """nasa badge"""]
UpperCamelCase = processor(text=__magic_name__ )
UpperCamelCase = 1_6
UpperCamelCase = inputs["""input_ids"""]
UpperCamelCase = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(images=__magic_name__ , query_images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(__magic_name__ )
UpperCamelCase = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
| 181
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__a = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __a( unittest.TestCase , _a ):
"""simple docstring"""
def a__ ( self ) -> str:
UpperCAmelCase_ : str = load_tool('''text-question-answering''' )
self.tool.setup()
UpperCAmelCase_ : Tuple = load_tool('''text-question-answering''' ,remote=__lowerCAmelCase )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.tool(__lowerCAmelCase ,'''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowerCAmelCase ,'''launched the BigScience Research Workshop''' )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.remote_tool(__lowerCAmelCase ,'''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowerCAmelCase ,'''launched the BigScience Research Workshop''' )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.tool(text=__lowerCAmelCase ,question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowerCAmelCase ,'''launched the BigScience Research Workshop''' )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.remote_tool(text=__lowerCAmelCase ,question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowerCAmelCase ,'''launched the BigScience Research Workshop''' )
| 30
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = TransfoXLTokenizer
snake_case__ : Union[str, Any] = False
snake_case__ : Union[str, Any] = False
def A__ ( self ):
"""simple docstring"""
super().setUp()
lowercase = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """<unk> UNwanted , running"""
lowercase = """<unk> unwanted, running"""
return input_text, output_text
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCAmelCase )
lowercase = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__lowerCAmelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [0, 4, 8, 7] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
lowercase = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
lowercase = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = len(__lowerCAmelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 359
| 0
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A: int = open # noqa: we just need to have a builtin inside this module to test it properly
| 359
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCAmelCase : List[str] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Tuple = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = """sgugger/tiny-distilbert-classification"""
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , only_pretrain_model=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : List[str] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , torchscript=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
# set architectures equal to `None`
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
UpperCAmelCase : str = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Union[str, Any] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """sshleifer/tinier_bart"""
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any = """sshleifer/tiny-gpt2"""
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = """sshleifer/tinier_bart"""
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = PyTorchBenchmark(_SCREAMING_SNAKE_CASE , configs=[config] )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , save_to_csv=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """train_time.csv""" ) , env_info_csv_file=os.path.join(_SCREAMING_SNAKE_CASE , """env.csv""" ) , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Union[str, Any] = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_SCREAMING_SNAKE_CASE ):
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """sequential""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """cumulative""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """current""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_SCREAMING_SNAKE_CASE , inference=_SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_SCREAMING_SNAKE_CASE , """log.txt""" ) , log_print=_SCREAMING_SNAKE_CASE , trace_memory_line_by_line=_SCREAMING_SNAKE_CASE , multi_process=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = PyTorchBenchmark(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_SCREAMING_SNAKE_CASE , """log.txt""" ) ).exists() )
| 359
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
snake_case_ : Tuple = False
@skip_mps
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = StableDiffusionAttendAndExcitePipeline
_lowerCAmelCase = False
_lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
_lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(A_ )
@classmethod
def a ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(A_ )
def a ( self ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A_ , )
_UpperCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_UpperCamelCase = CLIPTextModel(A_ )
_UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a ( self , A_ , A_=0 ):
if str(A_ ).startswith("mps" ):
_UpperCamelCase = torch.manual_seed(A_ )
else:
_UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCamelCase = _UpperCamelCase = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def a ( self ):
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = self.get_dummy_inputs(A_ )
_UpperCamelCase = pipe(**A_ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
_UpperCamelCase = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
_UpperCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ , 1e-3 )
def a ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a ( self ):
super().test_save_load_local(expected_max_difference=5e-4 )
def a ( self ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(A_ )
@classmethod
def a ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(A_ )
def a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
_UpperCamelCase = torch.manual_seed(51 )
_UpperCamelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=A_ , torch_dtype=torch.floataa )
pipe.to("cuda" )
_UpperCamelCase = "a painting of an elephant with glasses"
_UpperCamelCase = [5, 7]
_UpperCamelCase = pipe(
prompt=A_ , token_indices=A_ , guidance_scale=7.5 , generator=A_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 138
|
'''simple docstring'''
from __future__ import annotations
snake_case_ : str = '''#'''
class A_ :
'''simple docstring'''
def __init__( self ):
_UpperCamelCase = {}
def a ( self , A_ ):
_UpperCamelCase = self._trie
for char in text:
if char not in trie:
_UpperCamelCase = {}
_UpperCamelCase = trie[char]
_UpperCamelCase = True
def a ( self , A_ ):
_UpperCamelCase = self._trie
for char in prefix:
if char in trie:
_UpperCamelCase = trie[char]
else:
return []
return self._elements(A_ )
def a ( self , A_ ):
_UpperCamelCase = []
for c, v in d.items():
_UpperCamelCase = [" "] if c == END else [(c + s) for s in self._elements(A_ )]
result.extend(A_ )
return tuple(A_ )
snake_case_ : Optional[int] = Trie()
snake_case_ : List[str] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def lowercase__( _UpperCamelCase : str )-> tuple:
"""simple docstring"""
_UpperCamelCase = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def lowercase__( )-> None:
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 138
| 1
|
from __future__ import annotations
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = 0.00
_lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
_lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(snake_case )
first_sum += 1 / float(snake_case )
index += 1
return 1 / first_sum
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = 0.00
_lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
from __future__ import annotations
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = len(snake_case )
# We need to create solution object to save path.
_lowerCAmelCase = [[0 for _ in range(snake_case )] for _ in range(snake_case )]
_lowerCAmelCase = run_maze(snake_case , 0 , 0 , snake_case )
if solved:
print('\n'.join(str(snake_case ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case ):
_lowerCAmelCase = len(snake_case )
# Final check point.
if i == j == (size - 1):
_lowerCAmelCase = 1
return True
_lowerCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
_lowerCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowerCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowerCAmelCase = 1
# check for directions
if (
run_maze(snake_case , i + 1 , snake_case , snake_case )
or run_maze(snake_case , snake_case , j + 1 , snake_case )
or run_maze(snake_case , i - 1 , snake_case , snake_case )
or run_maze(snake_case , snake_case , j - 1 , snake_case )
):
return True
_lowerCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
assert isinstance(lowercase_ , lowercase_ ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
a_ : int = f'The input value of [n={number}] has to be > 0'
raise ValueError(lowercase_ )
else:
a_ : List[str] = sylvester(number - 1 )
a_ : Optional[Any] = num - 1
a_ : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 466
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> float:
def get_matched_characters(lowercase_ : str , lowercase_ : str ) -> str:
_lowerCamelCase = []
_lowerCamelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase = int(max(0 , i - limit ) )
_lowerCamelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase_ )
_lowerCamelCase = F"""{_stra[0:_stra.index(lowercase_ )]} {_stra[_stra.index(lowercase_ ) + 1:]}"""
return "".join(lowercase_ )
# matching characters
_lowerCamelCase = get_matched_characters(lowercase_ , lowercase_ )
_lowerCamelCase = get_matched_characters(lowercase_ , lowercase_ )
_lowerCamelCase = len(lowercase_ )
# transposition
_lowerCamelCase = (
len([(ca, ca) for ca, ca in zip(lowercase_ , lowercase_ ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase = 0.0
else:
_lowerCamelCase = (
1
/ 3
* (
match_count / len(lowercase_ )
+ match_count / len(lowercase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 661
| 0
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase =256
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''melgan''']
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> None:
super().__init__()
# From MELGAN
A = math.log(1E-5 ) # Matches MelGAN training.
A = 4.0 # Largest value for most examples
A = 1_2_8
self.register_modules(
notes_encoder=lowerCamelCase_ ,continuous_encoder=lowerCamelCase_ ,decoder=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,melgan=lowerCamelCase_ ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=(-1.0, 1.0) ,lowerCamelCase_=False ) -> str:
A , A = output_range
if clip:
A = torch.clip(lowerCamelCase_ ,self.min_value ,self.max_value )
# Scale to [0, 1].
A = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=(-1.0, 1.0) ,lowerCamelCase_=False ) -> Optional[Any]:
A , A = input_range
A = torch.clip(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) if clip else outputs
# Scale to [0, 1].
A = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = input_tokens > 0
A , A = self.notes_encoder(
encoder_input_tokens=lowerCamelCase_ ,encoder_inputs_mask=lowerCamelCase_ )
A , A = self.continuous_encoder(
encoder_inputs=lowerCamelCase_ ,encoder_inputs_mask=lowerCamelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = noise_time
if not torch.is_tensor(lowerCamelCase_ ):
A = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
A = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
A = self.decoder(
encodings_and_masks=lowerCamelCase_ ,decoder_input_tokens=lowerCamelCase_ ,decoder_noise_time=lowerCamelCase_ )
return logits
@torch.no_grad()
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = 1_0_0 ,lowerCamelCase_ = True ,lowerCamelCase_ = "numpy" ,lowerCamelCase_ = None ,lowerCamelCase_ = 1 ,) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowerCamelCase_ )}.' )
A = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
A = np.zeros([1, 0, self.n_dims] ,np.floataa )
A = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase_ ,device=self.device )
for i, encoder_input_tokens in enumerate(lowerCamelCase_ ):
if i == 0:
A = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
A = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase_ ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A = ones
A = self.scale_features(
lowerCamelCase_ ,output_range=[-1.0, 1.0] ,clip=lowerCamelCase_ )
A = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=lowerCamelCase_ ,continuous_mask=lowerCamelCase_ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=lowerCamelCase_ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A = self.decode(
encodings_and_masks=lowerCamelCase_ ,input_tokens=lowerCamelCase_ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
A = self.scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
A = self.scale_to_features(lowerCamelCase_ ,input_range=[-1.0, 1.0] )
A = mel[:1]
A = mel.cpu().float().numpy()
A = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ ,lowerCamelCase_ )
logger.info("""Generated segment""" ,lowerCamelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
A = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 709
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _A ( _a : np.ndarray , _a : int , _a : int ):
"""simple docstring"""
A = np.array(_a )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A = 0
A = 0
A = 0
A = 0
# compute the shape of the output matrix
A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A = 0
A = 0
return updated_arr
def _A ( _a : np.ndarray , _a : int , _a : int ):
"""simple docstring"""
A = np.array(_a )
if arr.shape[0] != arr.shape[1]:
raise ValueError("""The input array is not a square matrix""" )
A = 0
A = 0
A = 0
A = 0
# compute the shape of the output matrix
A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A = 0
A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
UpperCAmelCase =Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 255
| 0
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , _A : Optional[Any] , _A : Optional[int]=2 , _A : Tuple=32 , _A : int=16 , _A : List[Any]=3 , _A : int=True , _A : Any=True , _A : int=32 , _A : Any=4 , _A : Union[str, Any]=[0, 1, 2, 3] , _A : Optional[Any]=4 , _A : Dict=37 , _A : Any="gelu" , _A : Optional[Any]=0.1 , _A : Tuple=0.1 , _A : Union[str, Any]=0.02 , _A : Optional[Any]=3 , _A : List[str]=[1, 384, 24, 24] , _A : Union[str, Any]=True , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : List[str] = batch_size
__SCREAMING_SNAKE_CASE : int = image_size
__SCREAMING_SNAKE_CASE : Dict = patch_size
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : str = use_labels
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = backbone_out_indices
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = num_labels
__SCREAMING_SNAKE_CASE : Dict = backbone_featmap_shape
__SCREAMING_SNAKE_CASE : List[str] = scope
__SCREAMING_SNAKE_CASE : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE : List[Any] = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE : int = num_patches + 1
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase__ ( self : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = DPTModel(config=A__ )
model.to(A__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] , _A : int , _A : List[Any] , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.num_labels
__SCREAMING_SNAKE_CASE : Tuple = DPTForDepthEstimation(A__ )
model.to(A__ )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(A__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : List[Any] , _A : str , _A : Union[str, Any] , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = DPTForSemanticSegmentation(A__ )
model.to(A__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(A__ , labels=A__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = DPTModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : str = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__SCREAMING_SNAKE_CASE : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : int = model_class(A__ )
__SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A__ )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A__ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Any = True
if model_class in get_values(A__ ):
continue
__SCREAMING_SNAKE_CASE : Optional[int] = model_class(A__ )
model.to(A__ )
model.train()
__SCREAMING_SNAKE_CASE : int = self._prepare_for_class(A__ , A__ , return_labels=A__ )
__SCREAMING_SNAKE_CASE : Optional[int] = model(**A__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = True
if model_class in get_values(A__ ) or not model_class.supports_gradient_checkpointing:
continue
__SCREAMING_SNAKE_CASE : Tuple = model_class(A__ )
model.to(A__ )
model.gradient_checkpointing_enable()
model.train()
__SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
__SCREAMING_SNAKE_CASE : List[str] = model(**A__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(A__ )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : int = model_class(config=A__ )
# Skip the check for the backbone
__SCREAMING_SNAKE_CASE : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__SCREAMING_SNAKE_CASE : List[str] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__SCREAMING_SNAKE_CASE : int = DPTModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : Any = '''add'''
with self.assertRaises(A__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = DPTForDepthEstimation(A__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
__SCREAMING_SNAKE_CASE : List[str] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(A__ )
__SCREAMING_SNAKE_CASE : int = prepare_img()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = model(**A__ )
__SCREAMING_SNAKE_CASE : List[Any] = outputs.predicted_depth
# verify the predicted depth
__SCREAMING_SNAKE_CASE : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , A__ )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , A__ , atol=1e-4 ) )
| 74
|
'''simple docstring'''
from ....utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__=None , A__=20_48 ) -> Tuple:
_SCREAMING_SNAKE_CASE = config.__dict__
_SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
_SCREAMING_SNAKE_CASE = num_labels
| 591
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: str = inspect.getfile(accelerate.test_utils )
_UpperCamelCase: Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
_UpperCamelCase: int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
_UpperCamelCase: Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
_UpperCamelCase: List[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowercase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
_UpperCamelCase: Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowercase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowercase , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_UpperCamelCase: List[str] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_lowercase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = (accelerator.state.process_index + 2, 1_0)
UpperCAmelCase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 264
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( lowercase: ndarray ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase )
class __magic_name__ :
"""simple docstring"""
def __init__( self : int , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
"""simple docstring"""
_UpperCamelCase: int = regularization
_UpperCamelCase: Optional[int] = gamma
if kernel == "linear":
_UpperCamelCase: Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
_UpperCamelCase: Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_UpperCamelCase: Optional[int] = f"""Unknown kernel: {kernel}"""
raise ValueError(_lowercase )
def lowerCAmelCase ( self : Union[str, Any] , _lowercase : ndarray , _lowercase : ndarray ):
"""simple docstring"""
return np.dot(_lowercase , _lowercase )
def lowerCAmelCase ( self : str , _lowercase : ndarray , _lowercase : ndarray ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase ( self : str , _lowercase : list[ndarray] , _lowercase : ndarray ):
"""simple docstring"""
_UpperCamelCase: List[str] = observations
_UpperCamelCase: Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_UpperCamelCase) , ): Any = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
_UpperCamelCase: Optional[int] = 0
((_UpperCamelCase) , ): str = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
_UpperCamelCase: Optional[Any] = LinearConstraint(_lowercase , 0 , 0 )
_UpperCamelCase: Optional[int] = Bounds(0 , self.regularization )
_UpperCamelCase: Dict = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
_UpperCamelCase: Union[str, Any] = l_star
# calculating mean offset of separation plane to points
_UpperCamelCase: List[str] = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_UpperCamelCase: str = s / n
def lowerCAmelCase ( self : Optional[Any] , _lowercase : ndarray ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
UpperCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
UpperCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
UpperCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
UpperCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __snake_case ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCAmelCase ( __snake_case ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(__snake_case )
class lowerCAmelCase :
def __call__( self : Tuple , __lowercase : Optional[int] , __lowercase : int = None , __lowercase : Any = None , __lowercase : int = False , __lowercase : List[Any] = False , __lowercase : Any = None , __lowercase : str = None , __lowercase : Union[str, Any] = None , **__lowercase : int , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors=A_ , return_attention_mask=A_ , **A_ , )
elif titles is None or texts is None:
__lowercase =titles if texts is None else texts
return super().__call__(
A_ , A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors=A_ , return_attention_mask=A_ , **A_ , )
__lowercase =titles if not isinstance(A_ , A_ ) else [titles]
__lowercase =texts if not isinstance(A_ , A_ ) else [texts]
__lowercase =len(A_ )
__lowercase =questions if not isinstance(A_ , A_ ) else [questions] * n_passages
if len(A_ ) != len(A_ ):
raise ValueError(
f'''There should be as many titles than texts but got {len(A_ )} titles and {len(A_ )} texts.''' )
__lowercase =super().__call__(A_ , A_ , padding=A_ , truncation=A_ )["input_ids"]
__lowercase =super().__call__(A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ )["input_ids"]
__lowercase ={
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A_ , A_ )
]
}
if return_attention_mask is not False:
__lowercase =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase =attention_mask
return self.pad(A_ , padding=A_ , max_length=A_ , return_tensors=A_ )
def snake_case ( self : Any , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict = 16 , __lowercase : Optional[int] = 64 , __lowercase : Union[str, Any] = 4 , ):
"""simple docstring"""
__lowercase =reader_input["input_ids"]
__lowercase =reader_output[:3]
__lowercase =len(A_ )
__lowercase =sorted(range(A_ ) , reverse=A_ , key=relevance_logits.__getitem__ )
__lowercase =[]
for doc_id in sorted_docs:
__lowercase =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase =sequence_ids.index(self.pad_token_id )
else:
__lowercase =len(A_ )
__lowercase =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A_ , top_spans=A_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A_ , start_index=A_ , end_index=A_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(A_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case ( self : str , __lowercase : Tuple , __lowercase : Dict , __lowercase : Any , __lowercase : Optional[int] , ):
"""simple docstring"""
__lowercase =[]
for start_index, start_score in enumerate(A_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase =sorted(A_ , key=lambda __lowercase : x[1] , reverse=A_ )
__lowercase =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
__lowercase =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class lowerCAmelCase ( __snake_case , __snake_case ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = ['input_ids', 'attention_mask']
| 119
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A__ :
_UpperCAmelCase :Union[str, Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[Any] = os.path.join(A_ , "feat_extract.json" )
feat_extract_first.to_json_file(A_ )
UpperCamelCase : str = self.feature_extraction_class.from_json_file(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : int = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
UpperCamelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.feature_extraction_class()
self.assertIsNotNone(A_ )
| 629
| 0
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self :Dict , __magic_name__ :NestedDataStructureLike[PathLike] , __magic_name__ :Optional[NamedSplit] = None , __magic_name__ :Optional[Features] = None , __magic_name__ :str = None , __magic_name__ :bool = False , __magic_name__ :bool = False , __magic_name__ :Optional[str] = None , __magic_name__ :Optional[int] = None , **__magic_name__ :Optional[int] , ) -> Any:
'''simple docstring'''
super().__init__(
__magic_name__ , split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , num_proc=__magic_name__ , **__magic_name__ , )
a__ = field
a__ = path_or_paths if isinstance(__magic_name__ , __magic_name__ ) else {self.split: path_or_paths}
a__ = Json(
cache_dir=__magic_name__ , data_files=__magic_name__ , features=__magic_name__ , field=__magic_name__ , **__magic_name__ , )
def _UpperCamelCase ( self :List[str] ) -> str:
'''simple docstring'''
if self.streaming:
a__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a__ = None
a__ = None
a__ = None
a__ = None
self.builder.download_and_prepare(
download_config=__magic_name__ , download_mode=__magic_name__ , verification_mode=__magic_name__ , base_path=__magic_name__ , num_proc=self.num_proc , )
a__ = self.builder.as_dataset(
split=self.split , verification_mode=__magic_name__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self :str , __magic_name__ :Dataset , __magic_name__ :Union[PathLike, BinaryIO] , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[int] = None , **__magic_name__ :List[str] , ) -> Any:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
a__ = dataset
a__ = path_or_buf
a__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a__ = num_proc
a__ = '''utf-8'''
a__ = to_json_kwargs
def _UpperCamelCase ( self :Any ) -> int:
'''simple docstring'''
a__ = self.to_json_kwargs.pop('''path_or_buf''' , __magic_name__ )
a__ = self.to_json_kwargs.pop('''orient''' , '''records''' )
a__ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
a__ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
a__ = self.to_json_kwargs.pop('''compression''' , __magic_name__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__magic_name__ ) as buffer:
a__ = self._write(file_obj=__magic_name__ , orient=__magic_name__ , lines=__magic_name__ , index=__magic_name__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
''' was passed. Please provide a local path instead.''' )
a__ = self._write(
file_obj=self.path_or_buf , orient=__magic_name__ , lines=__magic_name__ , index=__magic_name__ , **self.to_json_kwargs )
return written
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ , a__ , a__ , a__ , a__ = args
a__ = query_table(
table=self.dataset.data , key=slice(__magic_name__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a__ = batch.to_pandas().to_json(
path_or_buf=__magic_name__ , orient=__magic_name__ , lines=__magic_name__ , index=__magic_name__ , **__magic_name__ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _UpperCamelCase ( self :Tuple , __magic_name__ :BinaryIO , __magic_name__ :str , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple , **__magic_name__ :str , ) -> int:
'''simple docstring'''
a__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
a__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__magic_name__ )
else:
a__ , a__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __magic_name__ , __magic_name__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__magic_name__ )
return written
| 717
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCAmelCase : Optional[int] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __snake_case ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
a__ = _TestCommandArgs(dataset=UpperCamelCase , all_configs=UpperCamelCase , save_infos=UpperCamelCase )
a__ = TestCommand(*UpperCamelCase )
test_command.run()
a__ = os.path.join(UpperCamelCase , '''README.md''' )
assert os.path.exists(UpperCamelCase )
a__ = DatasetInfosDict.from_directory(UpperCamelCase )
a__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
a__ , a__ = getattr(dataset_infos['''default'''] , UpperCamelCase ), getattr(expected_dataset_infos['''default'''] , UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase , UpperCamelCase )
elif key == "splits":
assert list(UpperCamelCase ) == list(UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 158
| 0
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase = 'bart'
lowerCAmelCase = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowercase__ = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
lowercase__ = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
lowercase__ = qar_model.eval()
else:
lowercase__ , lowercase__ = (None, None)
if MODEL_TYPE == "bart":
lowercase__ = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
lowercase__ = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
lowercase__ = sas_model.eval()
else:
lowercase__ , lowercase__ = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowercase__ = faiss.StandardGpuResources()
lowercase__ = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
lowercase__ = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
lowercase__ = faiss.IndexFlatIP(1_28 )
lowercase__ = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
lowercase__ , lowercase__ = (None, None)
lowercase__ = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
lowercase__ = elia['''train_eli5''']
lowercase__ = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
lowercase__ = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = load_indexes()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = load_models()
lowerCAmelCase, lowerCAmelCase = load_train_data()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=10 ):
"""simple docstring"""
lowercase__ = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="wiki40b" , SCREAMING_SNAKE_CASE="dense" , SCREAMING_SNAKE_CASE=10 ):
"""simple docstring"""
if source == "none":
lowercase__ , lowercase__ = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowercase__ , lowercase__ = query_qa_dense_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowercase__ , lowercase__ = query_es_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name='''english_wiki40b_snippets_100w''' , n_results=SCREAMING_SNAKE_CASE , )
lowercase__ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
lowercase__ = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None),
} )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2_56 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.95 , SCREAMING_SNAKE_CASE=0.8 ):
"""simple docstring"""
with torch.no_grad():
lowercase__ = qa_sas_generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
lowerCAmelCase = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
lowerCAmelCase = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
lowerCAmelCase = st.sidebar.checkbox('Demo options')
if demo_options:
lowerCAmelCase = st.sidebar.selectbox(
'',
action_list,
index=3,
)
lowerCAmelCase = action_list.index(action_st)
lowerCAmelCase = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
lowerCAmelCase = show_type == 'Show full text of passages'
else:
lowerCAmelCase = 3
lowerCAmelCase = True
lowerCAmelCase = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
lowerCAmelCase = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
lowerCAmelCase = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
lowerCAmelCase = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
lowerCAmelCase = 'wiki40b'
lowerCAmelCase = 'dense'
lowerCAmelCase = 'beam'
lowerCAmelCase = 2
lowerCAmelCase = 64
lowerCAmelCase = 256
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = st.sidebar.checkbox('Generation options')
if generate_options:
lowerCAmelCase = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
lowerCAmelCase = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
lowerCAmelCase = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCAmelCase = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCAmelCase = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCAmelCase = None
# start main text
lowerCAmelCase = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
lowerCAmelCase = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase = st.text_input('Enter your question here:', '')
else:
lowerCAmelCase = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method='dense', n_results=10)
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method='sparse', n_results=10)
lowerCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase = support_list[:10]
lowerCAmelCase = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCAmelCase, lowerCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
lowerCAmelCase = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
lowerCAmelCase = res[1].strip()
if sec_titles == "":
lowerCAmelCase = '[{}]({})'.format(res[0], wiki_url)
else:
lowerCAmelCase = sec_titles.split(' & ')
lowerCAmelCase = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase = find_nearest_training(question)
lowerCAmelCase = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
lowerCAmelCase = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
lowerCAmelCase = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 43
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__A : Any = logging.get_logger(__name__)
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ) -> None:
'''simple docstring'''
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 187
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Tuple = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 187
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__UpperCAmelCase = get_logger()
__UpperCAmelCase = None
class lowerCAmelCase_ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
super().__init__(features=SCREAMING_SNAKE_CASE_ )
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE_ )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
UpperCamelCase : Union[str, Any] = device if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase : int = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
UpperCamelCase : Dict = str(jax.devices()[0] )
UpperCamelCase : Dict = jnp_array_kwargs
@staticmethod
def snake_case_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(SCREAMING_SNAKE_CASE_ ): device for device in jax.devices()}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_, jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(SCREAMING_SNAKE_CASE_, axis=0 )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE_, (str, bytes, type(SCREAMING_SNAKE_CASE_ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE_, (np.character, np.ndarray) ) and np.issubdtype(value.dtype, np.character ):
return value.tolist()
UpperCamelCase : Optional[Any] = {}
if isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase : List[Any] = {'dtype': jnp.intaa}
else:
UpperCamelCase : Optional[int] = {'dtype': jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_, (np.number, np.ndarray) ) and np.issubdtype(value.dtype, np.floating ):
UpperCamelCase : Union[str, Any] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_, PIL.Image.Image ):
UpperCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE_, **{**default_dtype, **self.jnp_array_kwargs} )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Dict:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE_, torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(SCREAMING_SNAKE_CASE_, '__array__' ) and not isinstance(SCREAMING_SNAKE_CASE_, jax.Array ):
UpperCamelCase : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Any:
return map_nested(self._recursive_tensorize, SCREAMING_SNAKE_CASE_, map_list=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> "jax.Array":
UpperCamelCase : str = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_, pa_table.column_names[0] )
UpperCamelCase : Tuple = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self._consolidate(SCREAMING_SNAKE_CASE_ )
return column
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Mapping:
UpperCamelCase : List[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_ )
for column_name in batch:
UpperCamelCase : List[Any] = self._consolidate(batch[column_name] )
return batch
| 40
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = '''poolformer'''
def __init__( self , A__=3 , A__=16 , A__=16 , A__=3 , A__=4.0 , A__=[2, 2, 6, 2] , A__=[64, 128, 320, 512] , A__=[7, 3, 3, 3] , A__=[4, 2, 2, 2] , A__=[2, 1, 1, 1] , A__=4 , A__=0.0 , A__="gelu" , A__=True , A__=1E-5 , A__=0.02 , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: Any = num_channels
UpperCAmelCase_: Union[str, Any] = patch_size
UpperCAmelCase_: str = stride
UpperCAmelCase_: Tuple = padding
UpperCAmelCase_: Dict = pool_size
UpperCAmelCase_: Optional[int] = hidden_sizes
UpperCAmelCase_: Dict = mlp_ratio
UpperCAmelCase_: int = depths
UpperCAmelCase_: Any = patch_sizes
UpperCAmelCase_: str = strides
UpperCAmelCase_: int = num_encoder_blocks
UpperCAmelCase_: Optional[Any] = drop_path_rate
UpperCAmelCase_: Optional[Any] = hidden_act
UpperCAmelCase_: int = use_layer_scale
UpperCAmelCase_: Union[str, Any] = layer_scale_init_value
UpperCAmelCase_: int = initializer_range
super().__init__(**A__ )
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 2E-3
| 137
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 0
|
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __lowercase (_lowercase, _lowercase=False ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase : Dict = OmegaConf.load(_lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowercase ) ) )
return config
def __lowercase (_lowercase, _lowercase=None, _lowercase=None ) -> List[str]:
"""simple docstring"""
if conf_path is None:
__lowerCamelCase : Any = """./model_checkpoints/vqgan_only.yaml"""
__lowerCamelCase : Dict = load_config(_lowercase, display=_lowercase )
__lowerCamelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
__lowerCamelCase : Tuple = """./model_checkpoints/vqgan_only.pt"""
__lowerCamelCase : Dict = torch.load(_lowercase, map_location=_lowercase )
if ".ckpt" in ckpt_path:
__lowerCamelCase : Optional[Any] = sd["""state_dict"""]
model.load_state_dict(_lowercase, strict=_lowercase )
model.to(_lowercase )
del sd
return model
def __lowercase (_lowercase, _lowercase ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = model.encode(_lowercase )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__lowerCamelCase : List[Any] = model.decode(_lowercase )
return xrec
def __lowercase (_lowercase, _lowercase=False ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : Optional[Any] = string.rsplit(""".""", 1 )
if reload:
__lowerCamelCase : Dict = importlib.import_module(_lowercase )
importlib.reload(_lowercase )
return getattr(importlib.import_module(_lowercase, package=_lowercase ), cls )
def __lowercase (_lowercase ) -> int:
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""", {} ) )
def __lowercase (_lowercase, _lowercase, _lowercase=True, _lowercase=True ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase : str = instantiate_from_config(_lowercase )
if sd is not None:
model.load_state_dict(_lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase ) -> List[str]:
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__lowerCamelCase : int = torch.load(_lowercase, map_location="""cpu""" )
__lowerCamelCase : List[str] = pl_sd["""global_step"""]
print(f"loaded model from global step {global_step}." )
else:
__lowerCamelCase : Dict = {"""state_dict""": None}
__lowerCamelCase : str = None
__lowerCamelCase : int = load_model_from_config(config.model, pl_sd["""state_dict"""], gpu=_lowercase, eval_mode=_lowercase )["""model"""]
return model, global_step
| 150
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 150
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=13, A_=7, A_=True, A_=True, A_=True, A_=True, A_=True, A_=False, A_=False, A_=False, A_=2, A_=99, A_=0, A_=32, A_=5, A_=4, A_=0.1, A_=0.1, A_=512, A_=2, A_=0.02, A_=2, A_=4, A_="last", A_=True, A_=None, A_=0, ) -> Tuple:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =seq_length
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_input_lengths
UpperCAmelCase__ =use_token_type_ids
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =gelu_activation
UpperCAmelCase__ =sinusoidal_embeddings
UpperCAmelCase__ =causal
UpperCAmelCase__ =asm
UpperCAmelCase__ =n_langs
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =n_special
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =type_sequence_label_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =num_labels
UpperCAmelCase__ =num_choices
UpperCAmelCase__ =summary_type
UpperCAmelCase__ =use_proj
UpperCAmelCase__ =scope
UpperCAmelCase__ =bos_token_id
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase__ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ =None
if self.use_input_lengths:
UpperCAmelCase__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase__ =None
if self.use_token_type_ids:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
UpperCAmelCase__ =None
UpperCAmelCase__ =None
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase__ =ids_tensor([self.batch_size], 2 ).float()
UpperCAmelCase__ =ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCAmelCase ( self ) -> Optional[Any]:
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> int:
UpperCAmelCase__ =XLMModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ =model(_lowercase, lengths=_lowercase, langs=_lowercase )
UpperCAmelCase__ =model(_lowercase, langs=_lowercase )
UpperCAmelCase__ =model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> List[Any]:
UpperCAmelCase__ =XLMWithLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ =model(_lowercase, token_type_ids=_lowercase, labels=_lowercase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Tuple:
UpperCAmelCase__ =XLMForQuestionAnsweringSimple(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ =model(_lowercase )
UpperCAmelCase__ =model(_lowercase, start_positions=_lowercase, end_positions=_lowercase )
UpperCAmelCase__ =outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> int:
UpperCAmelCase__ =XLMForQuestionAnswering(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ =model(_lowercase )
UpperCAmelCase__ =model(
_lowercase, start_positions=_lowercase, end_positions=_lowercase, cls_index=_lowercase, is_impossible=_lowercase, p_mask=_lowercase, )
UpperCAmelCase__ =model(
_lowercase, start_positions=_lowercase, end_positions=_lowercase, cls_index=_lowercase, is_impossible=_lowercase, )
(UpperCAmelCase__ ) =result_with_labels.to_tuple()
UpperCAmelCase__ =model(_lowercase, start_positions=_lowercase, end_positions=_lowercase )
(UpperCAmelCase__ ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Optional[Any]:
UpperCAmelCase__ =XLMForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ =model(_lowercase )
UpperCAmelCase__ =model(_lowercase, labels=_lowercase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Optional[Any]:
UpperCAmelCase__ =self.num_labels
UpperCAmelCase__ =XLMForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ =model(_lowercase, attention_mask=_lowercase, labels=_lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Optional[Any]:
UpperCAmelCase__ =self.num_choices
UpperCAmelCase__ =XLMForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase__ =model(
_lowercase, attention_mask=_lowercase, token_type_ids=_lowercase, labels=_lowercase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =self.prepare_config_and_inputs()
(
UpperCAmelCase__
) =config_and_inputs
UpperCAmelCase__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class snake_case_ ( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCamelCase = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_ ) -> Optional[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCAmelCase ( self, A_, A_, A_=False ) -> Optional[Any]:
UpperCAmelCase__ =super()._prepare_for_class(_lowercase, _lowercase, return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCAmelCase__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_lowercase )
UpperCAmelCase__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_lowercase )
return inputs_dict
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =XLMModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=_lowercase, emb_dim=37 )
def __UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_lowercase )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_lowercase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_lowercase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_lowercase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_lowercase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_lowercase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_lowercase )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_=False, A_=1 ) -> int:
self.assertIsInstance(_lowercase, _lowercase )
self.assertListEqual(
[isinstance(_lowercase, _lowercase ) for iter_attentions in attentions], [True] * len(_lowercase ) )
self.assertEqual(len(_lowercase ), (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_lowercase ):
# adds PAD dummy token
UpperCAmelCase__ =min_length + idx + 1
UpperCAmelCase__ =min_length + idx + 1
UpperCAmelCase__ =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(_lowercase ) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_=False, A_=1 ) -> Any:
self.assertIsInstance(_lowercase, _lowercase )
self.assertListEqual(
[isinstance(_lowercase, _lowercase ) for iter_hidden_states in hidden_states], [True] * len(_lowercase ), )
self.assertEqual(len(_lowercase ), (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_lowercase ):
# adds PAD dummy token
UpperCAmelCase__ =min_length + idx + 1
UpperCAmelCase__ =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(_lowercase ), )
pass
@slow
def __UpperCAmelCase ( self ) -> int:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ =XLMModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(_lowercase )
UpperCAmelCase__ =torch.tensor([[14, 447]], dtype=torch.long, device=_lowercase ) # the president
UpperCAmelCase__ =[
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCAmelCase__ =model.generate(_lowercase, do_sample=_lowercase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), _lowercase )
| 707
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( A ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class snake_case_ ( a ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( A_ ) -> Union[str, Any]:
UpperCAmelCase__ =parser.add_parser(
"convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", )
train_parser.add_argument("--model_type", type=A_, required=A_, help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint", type=A_, required=A_, help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output", type=A_, required=A_, help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config", type=A_, default="", help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name", type=A_, default=A_, help="Optional fine-tuning task name if the TF model was a finetuned model.", )
train_parser.set_defaults(func=A_ )
def __init__( self, A_, A_, A_, A_, A_, *A_, ) -> List[str]:
UpperCAmelCase__ =logging.get_logger("transformers-cli/converting" )
self._logger.info(f"""Loading model {model_type}""" )
UpperCAmelCase__ =model_type
UpperCAmelCase__ =tf_checkpoint
UpperCAmelCase__ =pytorch_dump_output
UpperCAmelCase__ =config
UpperCAmelCase__ =finetuning_task_name
def __UpperCAmelCase ( self ) -> Tuple:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
else:
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
convert_transfo_xl_checkpoint_to_pytorch(
A_, self._config, self._pytorch_dump_output, A_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 510
| 0
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
if isinstance(snake_case__ ,snake_case__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE_ : Any = deepcopy(snake_case__ )
elif os.path.exists(snake_case__ ):
with io.open(snake_case__ ,'r' ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = json.load(snake_case__ )
else:
try:
SCREAMING_SNAKE_CASE_ : Dict = baseaa.urlsafe_baadecode(snake_case__ ).decode('utf-8' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(snake_case__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
SCREAMING_SNAKE_CASE_ : List[Any] = config
self.set_stage_and_offload()
def snake_case ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
SCREAMING_SNAKE_CASE_ : List[str] = self.get_value('zero_optimization.stage' ,-1 )
# offload
SCREAMING_SNAKE_CASE_ : int = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(['cpu', 'nvme'] )
SCREAMING_SNAKE_CASE_ : List[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE_ : int = True
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ : Dict = ds_key_long.split('.' )
SCREAMING_SNAKE_CASE_ : Dict = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE_ : Optional[int] = config.get(snake_case__ )
if config is None:
return None, ds_key
return config, ds_key
def snake_case ( self ,snake_case__ ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.find_config_node(snake_case__ )
if config is None:
return default
return config.get(snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : str = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ : int = ds_key_long.split('.' )
for node in nodes:
SCREAMING_SNAKE_CASE_ : Any = config
SCREAMING_SNAKE_CASE_ : str = config.get(snake_case__ )
if config is None:
if must_exist:
raise ValueError(F'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self.get_value(snake_case__ )
return False if value is None else bool(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = self.get_value(snake_case__ )
return False if value is None else not bool(snake_case__ )
def snake_case ( self ):
return self._stage == 2
def snake_case ( self ):
return self._stage == 3
def snake_case ( self ):
return self._offload
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = engine
def snake_case ( self ,snake_case__ ,**snake_case__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(snake_case__ ,**snake_case__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ):
super().__init__(snake_case__ ,device_placement=snake_case__ ,scaler=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = hasattr(self.optimizer ,'overflow' )
def snake_case ( self ,snake_case__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def snake_case ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def snake_case ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ):
super().__init__(snake_case__ ,snake_case__ )
def snake_case ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=0.001 ,snake_case__=0 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = params
SCREAMING_SNAKE_CASE_ : Dict = lr
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight_decay
SCREAMING_SNAKE_CASE_ : Dict = kwargs
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=None ,snake_case__=0 ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = optimizer
SCREAMING_SNAKE_CASE_ : List[Any] = total_num_steps
SCREAMING_SNAKE_CASE_ : Dict = warmup_num_steps
SCREAMING_SNAKE_CASE_ : str = kwargs
| 105
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
| 1
|
_lowercase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int = [False] * len(_UpperCamelCase )
lowerCamelCase__: List[str] = [s]
lowerCamelCase__: Dict = True
while queue:
lowerCamelCase__: Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: int = u
return visited[t]
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any = [-1] * (len(_UpperCamelCase ))
lowerCamelCase__: Any = 0
lowerCamelCase__: int = []
lowerCamelCase__: str = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowerCamelCase__: List[str] = float("""Inf""" )
lowerCamelCase__: List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__: int = min(_UpperCamelCase , graph[parent[s]][s] )
lowerCamelCase__: Union[str, Any] = parent[s]
max_flow += path_flow
lowerCamelCase__: str = sink
while v != source:
lowerCamelCase__: Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__: int = parent[v]
for i in range(len(_UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 242
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_lowercase = {
'gpt-neox-20b': 2_048,
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : int , __a : str=None , __a : Any=None , __a : Dict=None , __a : Tuple="<|endoftext|>" , __a : Tuple="<|endoftext|>" , __a : List[str]="<|endoftext|>" , __a : Any=False , **__a : Optional[Any] , ):
'''simple docstring'''
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
lowerCamelCase__: List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
lowerCamelCase__: Optional[Any] = getattr(__a , pre_tok_state.pop("""type""" ) )
lowerCamelCase__: str = add_prefix_space
lowerCamelCase__: int = pre_tok_class(**__a )
lowerCamelCase__: Optional[Any] = add_prefix_space
def lowerCamelCase_ ( self : str , __a : str , __a : Optional[str] = None ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCamelCase_ ( self : str , __a : "Conversation" ):
'''simple docstring'''
lowerCamelCase__: Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
lowerCamelCase__: Any = input_ids[-self.model_max_length :]
return input_ids
| 242
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , ) -> List[Any]:
'''simple docstring'''
if config_name_or_path is None:
_A = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
_A = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_A = question_encoder_name_or_path
_A = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
_A = RagConfig.from_pretrained(__lowercase )
_A = AutoConfig.from_pretrained(__lowercase )
_A = AutoConfig.from_pretrained(__lowercase )
_A = gen_config
_A = question_encoder_config
_A = model_class.from_pretrained_question_encoder_generator(
__lowercase , __lowercase , config=__lowercase )
rag_model.save_pretrained(__lowercase )
# Sanity check.
model_class.from_pretrained(__lowercase )
# Save tokenizers.
_A = AutoTokenizer.from_pretrained(__lowercase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
_A = AutoTokenizer.from_pretrained(__lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 330
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase_ = {
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
lowerCamelCase_ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = []
snake_case = []
def __init__( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]="<s>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Dict[str, Any]] = None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
_A = legacy_behaviour
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , )
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
_A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_A = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A = 1
_A = len(self.sp_model )
_A = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
_A = {v: k for k, v in self.lang_code_to_id.items()}
_A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_A = src_lang if src_lang is not None else "eng_Latn"
_A = self.lang_code_to_id[self._src_lang]
_A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
_A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
_A = [1] * len(self.prefix_tokens )
_A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_A = src_lang
_A = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
_A = self.convert_tokens_to_ids(__UpperCAmelCase )
_A = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : str , __UpperCAmelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip()
return out_string
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "eng_Latn" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "fra_Latn" , **__UpperCAmelCase : Any , ):
'''simple docstring'''
_A = src_lang
_A = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
else:
_A = [self.cur_lang_code]
_A = [self.eos_token_id]
def lowerCAmelCase ( self : int , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_A = []
_A = [self.eos_token_id, self.cur_lang_code]
else:
_A = [self.cur_lang_code]
_A = [self.eos_token_id]
| 330
| 1
|
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = 8.9_8_8E9 # units = N * m^s * C^-2
def UpperCamelCase__ ( _lowercase : float , _lowercase : float , _lowercase : float , _lowercase : float ) -> dict[str, float]:
__UpperCAmelCase: List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
__UpperCAmelCase: Dict = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCAmelCase: Tuple = abs(_lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCAmelCase: Union[str, Any] = abs(_lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCAmelCase: str = (COULOMBS_CONSTANT * charge_product / abs(_lowercase )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466
|
'''simple docstring'''
from ....utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=None , snake_case_=2048 ):
'''simple docstring'''
__UpperCAmelCase: List[str] = config.__dict__
__UpperCAmelCase: str = modal_hidden_size
if num_labels:
__UpperCAmelCase: int = num_labels
| 466
| 1
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def UpperCAmelCase_ ( UpperCAmelCase__ ):
def is_in_circle(UpperCAmelCase__ , UpperCAmelCase__ ) -> bool:
lowercase_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
lowercase_ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = 1.0 , ):
return mean(
function_to_integrate(uniform(UpperCamelCase__ , UpperCamelCase__ ) ) for _ in range(UpperCamelCase__ ) ) * (max_value - min_value)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = 1.0 ):
def identity_function(UpperCAmelCase__ ) -> float:
return x
lowercase_ = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("""******************""" )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
def function_to_integrate(UpperCAmelCase__ ) -> float:
return sqrt(4.0 - x * x )
lowercase_ = area_under_curve_estimator(
UpperCamelCase__ , UpperCamelCase__ , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , snake_case , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=3 , snake_case=0.6 , snake_case=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = mask_ratio
snake_case_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def a ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = ViTMAEModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = model(snake_case )
snake_case_ = (self.image_size // self.patch_size) ** 2
snake_case_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ = 1
snake_case_ = ViTMAEForPreTraining(snake_case )
model.to(snake_case )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(snake_case )
snake_case_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def a ( self ):
snake_case_ = ViTMAEModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def a ( self ):
pass
def a ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def a ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def a ( self , snake_case , snake_case , snake_case ):
# make masks reproducible
np.random.seed(2 )
snake_case_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = torch.from_numpy(snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ = pt_noise
super().check_pt_tf_models(snake_case , snake_case , snake_case )
def a ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case )
model.to(snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ = outputs[0].cpu().numpy()
snake_case_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
snake_case_ = model_class.from_pretrained(snake_case )
model.to(snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(snake_case , snake_case ) )
# Make sure we don't have nans
snake_case_ = after_outputs[0].cpu().numpy()
snake_case_ = 0
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def a ( self ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def a ( self ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def a ( self ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def a ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
pass
@slow
def a ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTMAEModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a ( self ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def a ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
snake_case_ = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(snake_case )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ = ViTMAEConfig()
snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case , noise=torch.from_numpy(snake_case ).to(device=snake_case ) )
# verify the logits
snake_case_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , snake_case )
snake_case_ = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(snake_case ) , atol=1e-4 ) )
| 362
| 0
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = [], []
while len(_SCREAMING_SNAKE_CASE ) > 1:
__lowercase , __lowercase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
start.append(_SCREAMING_SNAKE_CASE )
end.append(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
snake_case__ : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
snake_case__ : Dict = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 702
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
model.train()
__lowercase = model(_SCREAMING_SNAKE_CASE )
__lowercase = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
set_seed(4_2 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_SCREAMING_SNAKE_CASE )
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__lowercase = AdamW(params=model.parameters() , lr=1E-3 )
__lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
__lowercase = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
__lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__lowercase , __lowercase = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__lowercase = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def snake_case_ ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowercase = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowercase , __lowercase = accelerator.gather((ddp_input, ddp_target) )
__lowercase , __lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
__lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = RegressionDataset(length=8_0 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase = RegressionDataset(length=9_6 )
__lowercase = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=1_6 )
__lowercase , __lowercase = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ):
__lowercase = Accelerator()
__lowercase = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 655
| 0
|
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __UpperCamelCase ( __lowerCamelCase ):
def __lt__( self : List[Any] , UpperCAmelCase : Tuple ) -> Dict:
return self[-1] < other[-1]
def __eq__( self : Optional[Any] , UpperCAmelCase : List[Any] ) -> Dict:
return self[-1] == other[-1]
def UpperCAmelCase ( a__ ):
'''simple docstring'''
lowerCAmelCase :list[Stack] = []
# sort into stacks
for element in collection:
lowerCAmelCase :Dict = Stack([element] )
lowerCAmelCase :Union[str, Any] = bisect_left(lowerCamelCase_ , lowerCamelCase_ )
if i != len(lowerCamelCase_ ):
stacks[i].append(lowerCamelCase_ )
else:
stacks.append(lowerCamelCase_ )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase :List[str] = merge(*(reversed(lowerCamelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 553
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 100_0000 , _UpperCAmelCase = 10 ) -> int:
lowerCamelCase__ : defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase__ : List[str] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase__ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 188
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_UpperCAmelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
_UpperCAmelCase : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class lowerCAmelCase :
def __init__( self : Any , UpperCAmelCase : Iterable[int] ) -> None:
lowerCamelCase__ : Node | None = None
for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ):
lowerCamelCase__ : Tuple = Node(UpperCAmelCase , self.head )
def __iter__( self : Optional[int] ) -> Iterator[int]:
lowerCamelCase__ : str = self.head
while node:
yield node.data
lowerCamelCase__ : List[str] = node.next_node
def __len__( self : List[str] ) -> int:
return sum(1 for _ in self )
def __str__( self : Any ) -> str:
return " -> ".join([str(UpperCAmelCase ) for node in self] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(_UpperCAmelCase ) + list(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 188
| 1
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCAmelCase_ : Tuple = HUGGINGFACE_HUB_CACHE
lowerCAmelCase_ : Dict = '''config.json'''
lowerCAmelCase_ : List[Any] = '''diffusion_pytorch_model.bin'''
lowerCAmelCase_ : Optional[Any] = '''diffusion_flax_model.msgpack'''
lowerCAmelCase_ : Tuple = '''model.onnx'''
lowerCAmelCase_ : Tuple = '''diffusion_pytorch_model.safetensors'''
lowerCAmelCase_ : Union[str, Any] = '''weights.pb'''
lowerCAmelCase_ : str = '''https://huggingface.co'''
lowerCAmelCase_ : Any = default_cache_path
lowerCAmelCase_ : str = '''diffusers_modules'''
lowerCAmelCase_ : Dict = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
lowerCAmelCase_ : Optional[int] = ['''fp16''', '''non-ema''']
lowerCAmelCase_ : Optional[int] = '''.self_attn'''
| 414
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Any = LxmertTokenizer
snake_case : Tuple = LxmertTokenizerFast
snake_case : List[str] = True
snake_case : int = True
def snake_case_ (self ):
super().setUp()
_UpperCAmelCase : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = """UNwant\u00E9d,running"""
_UpperCAmelCase : str = """unwanted, running"""
return input_text, output_text
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [7, 4, 5, 1_0, 8, 9] )
def snake_case_ (self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_rust_tokenizer()
_UpperCAmelCase : int = """I was born in 92000, and this is falsé."""
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : str = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 414
| 1
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
lowerCAmelCase__ = 'path-to-your-trained-model'
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'generator': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 714
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCAmelCase__ = model.state_dict()
lowerCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
lowerCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
lowerCAmelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
lowerCAmelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCAmelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"]
lowerCAmelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 576
| 0
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : List[Any]=13 , __lowercase : List[str]=7 , __lowercase : Any=True , __lowercase : Tuple=True , __lowercase : List[Any]=False , __lowercase : Any=True , __lowercase : List[str]=99 , __lowercase : Any=32 , __lowercase : Optional[Any]=5 , __lowercase : Dict=4 , __lowercase : Optional[Any]=37 , __lowercase : int="gelu" , __lowercase : Optional[int]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Tuple=512 , __lowercase : str=16 , __lowercase : Dict=2 , __lowercase : Any=0.02 , __lowercase : Optional[Any]=3 , __lowercase : Any=4 , __lowercase : Optional[Any]=None , ) -> List[Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : Any = use_input_mask
__UpperCAmelCase : Optional[int] = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Tuple = type_sequence_label_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Any = num_choices
__UpperCAmelCase : Tuple = scope
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Any ) -> int:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self : Any , __lowercase : str , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : str ) -> str:
__UpperCAmelCase : Any = DistilBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Any = model(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Dict ) -> int:
__UpperCAmelCase : Union[str, Any] = DistilBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Dict = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : int , __lowercase : int , __lowercase : Tuple , __lowercase : List[str] ) -> Tuple:
__UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[Any] = model(
__lowercase , attention_mask=__lowercase , start_positions=__lowercase , end_positions=__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : Tuple , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : int ) -> str:
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[str] = DistilBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Dict , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : str , __lowercase : Any , __lowercase : str ) -> int:
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = DistilBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : List[str] , __lowercase : int ) -> str:
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__lowercase , attention_mask=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : int = config_and_inputs
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a : Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple = True
a : Tuple = True
a : Any = True
a : Optional[Any] = True
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : Dict = DistilBertModelTester(self )
__UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__lowercase , dim=37 )
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Any ) -> Any:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__lowercase )
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__lowercase )
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__lowercase )
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> int:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__lowercase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = DistilBertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@slow
@require_torch_gpu
def UpperCAmelCase ( self : List[Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Dict = model_class(config=__lowercase )
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowercase , __lowercase )
__UpperCAmelCase : Dict = torch.jit.trace(
__lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__lowercase , os.path.join(__lowercase , """traced_model.pt""" ) )
__UpperCAmelCase : int = torch.jit.load(os.path.join(__lowercase , """traced_model.pt""" ) , map_location=__lowercase )
loaded(inputs_dict["""input_ids"""].to(__lowercase ) , inputs_dict["""attention_mask"""].to(__lowercase ) )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCAmelCase : Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : List[str] = model(__lowercase , attention_mask=__lowercase )[0]
__UpperCAmelCase : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : Dict = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1e-4 ) )
| 63
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63
| 1
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
UpperCAmelCase = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
UpperCAmelCase = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = (images / 2 + 0.5).clamp(0 , 1 )
lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase = numpy_to_pil(UpperCamelCase__ )
return images
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if images.ndim == 3:
lowercase = images[None, ...]
lowercase = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
lowercase = [Image.fromarray(UpperCamelCase__ ) for image in images]
return pil_images
| 720
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase = logging.getLogger(__name__)
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """token-classification"""
def __init__( self , snake_case ):
if type(snake_case ) == dict:
lowercase = Namespace(**snake_case )
lowercase = import_module('tasks' )
try:
lowercase = getattr(snake_case , hparams.task_type )
lowercase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase = self.token_classification_task.get_labels(hparams.labels )
lowercase = CrossEntropyLoss().ignore_index
super().__init__(snake_case , len(self.labels ) , self.mode )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return self.model(**snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowercase = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase = self(**snake_case )
lowercase = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.hparams
for mode in ["train", "dev", "test"]:
lowercase = self._feature_file(snake_case )
if os.path.exists(snake_case ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
lowercase = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case )
lowercase = self.token_classification_task.convert_examples_to_features(
snake_case , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , snake_case )
torch.save(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = False ):
lowercase = self._feature_file(snake_case )
logger.info('Loading features from cached file %s' , snake_case )
lowercase = torch.load(snake_case )
lowercase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case , snake_case , snake_case , snake_case ) , batch_size=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
"""Compute validation""" ""
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
lowercase = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase = self(**snake_case )
lowercase , lowercase = outputs[:2]
lowercase = logits.detach().cpu().numpy()
lowercase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = torch.stack([x['val_loss'] for x in outputs] ).mean()
lowercase = np.concatenate([x['pred'] for x in outputs] , axis=0 )
lowercase = np.argmax(snake_case , axis=2 )
lowercase = np.concatenate([x['target'] for x in outputs] , axis=0 )
lowercase = dict(enumerate(self.labels ) )
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(snake_case , snake_case ),
'precision': precision_score(snake_case , snake_case ),
'recall': recall_score(snake_case , snake_case ),
'f1': fa_score(snake_case , snake_case ),
}
lowercase = dict(results.items() )
lowercase = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# when stable
lowercase , lowercase , lowercase = self._eval_end(snake_case )
lowercase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# updating to test_epoch_end instead of deprecated test_end
lowercase , lowercase , lowercase = self._eval_end(snake_case )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case ):
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case , snake_case )
parser.add_argument(
'--task_type' , default='NER' , type=snake_case , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=snake_case , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=snake_case , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = NERTransformer(args)
UpperCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 565
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class _lowerCamelCase( _snake_case ):
lowercase_ : Dict = """gpt_bigcode"""
lowercase_ : str = ["""past_key_values"""]
lowercase_ : Tuple = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self, lowerCamelCase=5_02_57, lowerCamelCase=10_24, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=None, lowerCamelCase="gelu_pytorch_tanh", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=1E-5, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=5_02_56, lowerCamelCase=5_02_56, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = n_positions
_lowercase : List[Any] = n_embd
_lowercase : Union[str, Any] = n_layer
_lowercase : Union[str, Any] = n_head
_lowercase : Optional[int] = n_inner
_lowercase : Optional[Any] = activation_function
_lowercase : Union[str, Any] = resid_pdrop
_lowercase : Union[str, Any] = embd_pdrop
_lowercase : List[Any] = attn_pdrop
_lowercase : Optional[int] = layer_norm_epsilon
_lowercase : Any = initializer_range
_lowercase : str = scale_attn_weights
_lowercase : Tuple = use_cache
_lowercase : Optional[int] = attention_softmax_in_fpaa
_lowercase : List[Any] = scale_attention_softmax_in_fpaa
_lowercase : Any = multi_query
_lowercase : Optional[int] = bos_token_id
_lowercase : Optional[int] = eos_token_id
super().__init__(bos_token_id=__snake_case, eos_token_id=__snake_case, **__snake_case)
| 89
|
'''simple docstring'''
import argparse
import datetime
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
_SCREAMING_SNAKE_CASE : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(SCREAMING_SNAKE_CASE__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
_SCREAMING_SNAKE_CASE : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
_SCREAMING_SNAKE_CASE : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
_SCREAMING_SNAKE_CASE : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
_SCREAMING_SNAKE_CASE : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
_SCREAMING_SNAKE_CASE : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
_SCREAMING_SNAKE_CASE : Optional[int] = datetime.date(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) )
# Start math
if m <= 2:
_SCREAMING_SNAKE_CASE : Union[str, Any] = y - 1
_SCREAMING_SNAKE_CASE : Optional[int] = m + 12
# maths var
_SCREAMING_SNAKE_CASE : int = int(str(SCREAMING_SNAKE_CASE__ )[:2] )
_SCREAMING_SNAKE_CASE : int = int(str(SCREAMING_SNAKE_CASE__ )[2:] )
_SCREAMING_SNAKE_CASE : int = int(2.6 * m - 5.3_9 )
_SCREAMING_SNAKE_CASE : int = int(c / 4 )
_SCREAMING_SNAKE_CASE : int = int(k / 4 )
_SCREAMING_SNAKE_CASE : int = int(d + k )
_SCREAMING_SNAKE_CASE : int = int(t + u + v + x )
_SCREAMING_SNAKE_CASE : int = int(z - (2 * c) )
_SCREAMING_SNAKE_CASE : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
_SCREAMING_SNAKE_CASE : str = f"""Your date {date_input}, is a {days[str(SCREAMING_SNAKE_CASE__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Tuple = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
zeller(args.date_input)
| 533
| 0
|
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase = get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F'''Saving model to {ckpt_dir}''' )
lowercase = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__SCREAMING_SNAKE_CASE , storage_writer=dist_cp.FileSystemWriter(__SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__SCREAMING_SNAKE_CASE ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
lowercase = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Loading model from {input_model_file}''' )
lowercase = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Loading model from {input_model_file}''' )
lowercase = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase = (
os.path.join(__SCREAMING_SNAKE_CASE , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
lowercase = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__SCREAMING_SNAKE_CASE , storage_reader=dist_cp.FileSystemReader(__SCREAMING_SNAKE_CASE ) , planner=DefaultLoadPlanner() , )
lowercase = state_dict['model']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase = FSDP.optim_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__SCREAMING_SNAKE_CASE ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__SCREAMING_SNAKE_CASE , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowercase = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
lowercase = torch.load(__SCREAMING_SNAKE_CASE )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
lowercase = (
os.path.join(__SCREAMING_SNAKE_CASE , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
lowercase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__SCREAMING_SNAKE_CASE ) , )
lowercase = optim_state['optimizer']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
lowercase = FSDP.optim_state_dict_to_load(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
optimizer.load_state_dict(__SCREAMING_SNAKE_CASE )
| 706
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
UpperCAmelCase = '''▁'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case=100 , snake_case=None , snake_case = None , snake_case=True , **snake_case , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase = [F'''<extra_id_{i}>''' for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase = len(set(filter(lambda snake_case : bool('extra_id' in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowercase = legacy
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case , **snake_case , )
lowercase = vocab_file
lowercase = extra_ids
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case , snake_case ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ):
return list(
set(filter(lambda snake_case : bool(re.search(r'<extra_id_\d+>' , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
lowercase = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , snake_case ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowercase = SPIECE_UNDERLINE + text.replace(snake_case , ' ' )
return super().tokenize(snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
if not self.legacy:
lowercase = text.startswith(snake_case )
if is_first:
lowercase = text[1:]
lowercase = self.sp_model.encode(snake_case , out_type=snake_case )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case ):
lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if token.startswith('<extra_id_' ):
lowercase = re.match(r'<extra_id_(\d+)>' , snake_case )
lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if index < self.sp_model.get_piece_size():
lowercase = self.sp_model.IdToPiece(snake_case )
else:
lowercase = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
lowercase = ''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(snake_case )
lowercase = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 565
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = """audio-spectrogram-transformer"""
def __init__( self , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-1_2 , A_=16 , A_=True , A_=10 , A_=10 , A_=10_24 , A_=1_28 , **A_ , ):
super().__init__(**A_ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = patch_size
_UpperCamelCase = qkv_bias
_UpperCamelCase = frequency_stride
_UpperCamelCase = time_stride
_UpperCamelCase = max_length
_UpperCamelCase = num_mel_bins
| 138
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : PreTrainedTokenizer , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = {}
if train_file is not None:
_UpperCamelCase = [train_file]
if eval_file is not None:
_UpperCamelCase = [eval_file]
if test_file is not None:
_UpperCamelCase = [test_file]
_UpperCamelCase = datasets.load_dataset("csv" , data_files=_UpperCamelCase )
_UpperCamelCase = list(ds[list(files.keys() )[0]].features.keys() )
_UpperCamelCase = features_name.pop(_UpperCamelCase )
_UpperCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
_UpperCamelCase = {label: i for i, label in enumerate(_UpperCamelCase )}
_UpperCamelCase = tokenizer.model_input_names
_UpperCamelCase = {}
if len(_UpperCamelCase ) == 1:
for k in files.keys():
_UpperCamelCase = ds[k].map(
lambda _UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" ) , batched=_UpperCamelCase , )
elif len(_UpperCamelCase ) == 2:
for k in files.keys():
_UpperCamelCase = ds[k].map(
lambda _UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , ) , batched=_UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_UpperCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_UpperCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_UpperCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case_ : Tuple = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = field(metadata={"""help""": """Which column contains the label"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the training file"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the development file"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the test file"""} )
_lowerCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowercase__( )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_UpperCamelCase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_UpperCamelCase = TFTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(_UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 138
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( lowerCamelCase ):
a__ = (UniPCMultistepScheduler,)
a__ = (('''num_inference_steps''', 25),)
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**__lowerCAmelCase )
return config
def A ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = dict(self.forward_default_kwargs )
__magic_name__ :Optional[int] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
__magic_name__ :Optional[Any] = self.dummy_sample
__magic_name__ :Any = 0.1 * sample
__magic_name__ :List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__magic_name__ :Optional[Any] = self.get_scheduler_config(**__lowerCAmelCase )
__magic_name__ :Tuple = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
__magic_name__ :str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
__magic_name__ :Optional[Any] = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
__magic_name__ :Any = dummy_past_residuals[: new_scheduler.config.solver_order]
__magic_name__ , __magic_name__ :Any = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__magic_name__ :str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
__magic_name__ :Union[str, Any] = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = dict(self.forward_default_kwargs )
__magic_name__ :List[Any] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
__magic_name__ :Tuple = self.dummy_sample
__magic_name__ :Optional[int] = 0.1 * sample
__magic_name__ :List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__magic_name__ :List[str] = self.get_scheduler_config()
__magic_name__ :Optional[int] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__magic_name__ :Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
__magic_name__ :Tuple = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__magic_name__ :int = dummy_past_residuals[: new_scheduler.config.solver_order]
__magic_name__ :Optional[Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
__magic_name__ :str = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if scheduler is None:
__magic_name__ :Tuple = self.scheduler_classes[0]
__magic_name__ :int = self.get_scheduler_config(**__lowerCAmelCase )
__magic_name__ :Union[str, Any] = scheduler_class(**__lowerCAmelCase )
__magic_name__ :int = self.scheduler_classes[0]
__magic_name__ :Dict = self.get_scheduler_config(**__lowerCAmelCase )
__magic_name__ :Union[str, Any] = scheduler_class(**__lowerCAmelCase )
__magic_name__ :int = 1_0
__magic_name__ :Optional[Any] = self.dummy_model()
__magic_name__ :Tuple = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ :int = model(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = dict(self.forward_default_kwargs )
__magic_name__ :Optional[Any] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
__magic_name__ :Optional[int] = self.get_scheduler_config()
__magic_name__ :Dict = scheduler_class(**__lowerCAmelCase )
__magic_name__ :Union[str, Any] = self.dummy_sample
__magic_name__ :Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , '''set_timesteps''' ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , '''set_timesteps''' ):
__magic_name__ :Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__magic_name__ :List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
__magic_name__ :Tuple = dummy_past_residuals[: scheduler.config.solver_order]
__magic_name__ :str = scheduler.timesteps[5]
__magic_name__ :Dict = scheduler.timesteps[6]
__magic_name__ :List[Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
__magic_name__ :str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self ):
"""simple docstring"""
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__magic_name__ :List[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
__magic_name__ :str = self.full_loop(scheduler=__lowerCAmelCase )
__magic_name__ :Any = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
__magic_name__ :Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__magic_name__ :str = DEISMultistepScheduler.from_config(scheduler.config )
__magic_name__ :int = DPMSolverMultistepScheduler.from_config(scheduler.config )
__magic_name__ :Optional[int] = UniPCMultistepScheduler.from_config(scheduler.config )
__magic_name__ :str = self.full_loop(scheduler=__lowerCAmelCase )
__magic_name__ :Any = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def A ( self ):
"""simple docstring"""
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
__magic_name__ :List[Any] = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.full_loop()
__magic_name__ :Any = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.full_loop(prediction_type='''v_prediction''' )
__magic_name__ :Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.scheduler_classes[0]
__magic_name__ :int = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
__magic_name__ :List[str] = scheduler_class(**__lowerCAmelCase )
__magic_name__ :Any = 1_0
__magic_name__ :Tuple = self.dummy_model()
__magic_name__ :Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self , **__lowerCAmelCase ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
__magic_name__ :int = self.get_scheduler_config(**__lowerCAmelCase )
__magic_name__ :str = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 180
|
from __future__ import annotations
from math import pi, sqrt
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 215
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 215
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_UpperCamelCase : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : Any = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : Optional[Any] = {
'unc-nlp/lxmert-base-uncased': 512,
}
_UpperCamelCase : str = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class snake_case__ ( UpperCamelCase):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = LxmertTokenizer
def __init__( self : Union[str, Any] , _A : str=None , _A : Dict=None , _A : Tuple=True , _A : Union[str, Any]="[UNK]" , _A : Union[str, Any]="[SEP]" , _A : Optional[Any]="[PAD]" , _A : List[str]="[CLS]" , _A : int="[MASK]" , _A : Optional[int]=True , _A : List[Any]=None , **_A : str , ) -> List[str]:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Dict = getattr(_A , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : int = do_lower_case
UpperCAmelCase_ : Optional[int] = strip_accents
UpperCAmelCase_ : int = tokenize_chinese_chars
UpperCAmelCase_ : str = normalizer_class(**_A )
UpperCAmelCase_ : Tuple = do_lower_case
def A ( self : Optional[int] , _A : Optional[int] , _A : Dict=None ) -> Optional[int]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Any = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 216
|
'''simple docstring'''
class snake_case__ :
def __init__( self : Dict , _A : int ) -> Tuple:
UpperCAmelCase_ : List[str] = n
UpperCAmelCase_ : Optional[Any] = [None] * self.n
UpperCAmelCase_ : List[str] = 0 # index of the first element
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[Any] = 0
def __len__( self : Optional[int] ) -> int:
return self.size
def A ( self : List[Any] ) -> bool:
return self.size == 0
def A ( self : str ) -> Dict:
return False if self.is_empty() else self.array[self.front]
def A ( self : Any , _A : int ) -> List[str]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase_ : Dict = data
UpperCAmelCase_ : List[str] = (self.rear + 1) % self.n
self.size += 1
return self
def A ( self : Optional[int] ) -> str:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase_ : Dict = self.array[self.front]
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = (self.front + 1) % self.n
self.size -= 1
return temp
| 216
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase : List[str] = TypeVar('''T''')
lowerCAmelCase : int = TypeVar('''U''')
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> List[Any]:
snake_case__ = key
snake_case__ = val
snake_case__ = None
snake_case__ = None
def __repr__( self : Optional[int] ) -> str:
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : List[Any] ) -> None:
snake_case__ = DoubleLinkedListNode(a__ , a__ )
snake_case__ = DoubleLinkedListNode(a__ , a__ )
snake_case__ , snake_case__ = self.rear, self.head
def __repr__( self : Tuple ) -> str:
snake_case__ = ["""DoubleLinkedList"""]
snake_case__ = self.head
while node.next is not None:
rep.append(str(a__ ) )
snake_case__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(a__ )
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : str ) -> None:
snake_case__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case__ = node
snake_case__ = previous
snake_case__ = node
snake_case__ = self.rear
def UpperCAmelCase_ ( self : Dict , lowerCAmelCase__ : Any ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
snake_case__ = node.next
snake_case__ = node.prev
snake_case__ = None
snake_case__ = None
return node
class SCREAMING_SNAKE_CASE__ ( Generic[T, U] ):
'''simple docstring'''
UpperCamelCase__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Any] ) -> str:
snake_case__ = DoubleLinkedList()
snake_case__ = capacity
snake_case__ = 0
snake_case__ = 0
snake_case__ = 0
snake_case__ = {}
def __repr__( self : List[Any] ) -> str:
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Any , lowerCAmelCase__ : int ) -> bool:
return key in self.cache
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
snake_case__ = self.cache[key]
snake_case__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(a__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(a__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case__ = DoubleLinkedListNode(a__ , a__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
snake_case__ = value
self.list.add(a__ )
@classmethod
def UpperCAmelCase_ ( cls : Union[str, Any] , lowerCAmelCase__ : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowerCAmelCase__ : Any ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase__ : Dict ) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case__ = LRUCache(a__ )
snake_case__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
snake_case__ = func(*a__ )
cls.decorator_function_to_instance_map[func].put(args[0] , a__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(a__ , """cache_info""" , a__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
__magic_name__ : int = MODEL_FOR_MASKED_LM_MAPPING
__magic_name__ : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] , )
A_ = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
A_ = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(a__ , a__ )
@slow
@require_torch
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(a__ )
@slow
@require_tf
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(a__ )
def lowerCAmelCase_ ( self , a__ ) -> Tuple:
'''simple docstring'''
A_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(a__ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_08, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_07, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] , )
A_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(a__ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_51,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_14,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] , )
A_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(a__ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_00, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_00, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
A_ = None
A_ = None
self.run_pipeline_test(a__ , [] )
@require_tf
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
A_ = None
A_ = None
self.run_pipeline_test(a__ , [] )
def lowerCAmelCase_ ( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = [
F"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def lowerCAmelCase_ ( self , a__ , a__ ) -> Any:
'''simple docstring'''
A_ = fill_masker.tokenizer
A_ = fill_masker.model
A_ = fill_masker(
F"This is a {tokenizer.mask_token}" , )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = fill_masker([F"This is a {tokenizer.mask_token}"] )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = fill_masker([F"This is a {tokenizer.mask_token}", F"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
a__ , [
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
] , )
with self.assertRaises(a__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(a__ ):
fill_masker('''This is''' )
self.run_test_top_k(a__ , a__ )
self.run_test_targets(a__ , a__ )
self.run_test_top_k_targets(a__ , a__ )
self.fill_mask_with_duplicate_targets_and_top_k(a__ , a__ )
self.fill_mask_with_multiple_masks(a__ , a__ )
def lowerCAmelCase_ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = sorted(vocab.keys() )[:2]
# Pipeline argument
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ , targets=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , a__ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(a__ ) )
# Call argument
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , a__ )
A_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(a__ ) )
# Score equivalence
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
A_ = [top_mask['''token_str'''] for top_mask in outputs]
A_ = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a__ ) == set(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=a__ )
A_ = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
# Raises with invalid
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[''''''] )
with self.assertRaises(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , targets='''''' )
def lowerCAmelCase_ ( self , a__ , a__ ) -> int:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ , top_k=2 )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a__ , [
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
] , )
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
def lowerCAmelCase_ ( self , a__ , a__ ) -> str:
'''simple docstring'''
A_ = tokenizer.get_vocab()
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
# top_k=2, ntargets=3
A_ = sorted(vocab.keys() )[:3]
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 , targets=a__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
A_ = [el['''token_str'''] for el in sorted(a__ , key=lambda a__ : x["score"] , reverse=a__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(a__ ).issubset(a__ ):
A_ = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=3 , targets=a__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(a__ ) , nested_simplify(a__ ) )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = tokenizer.get_vocab()
# String duplicates + id duplicates
A_ = sorted(vocab.keys() )[:3]
A_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
A_ = fill_masker(F"My name is {tokenizer.mask_token}" , targets=a__ , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(a__ ) , 3 )
def lowerCAmelCase_ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
A_ = FillMaskPipeline(model=a__ , tokenizer=a__ )
A_ = fill_masker(
F"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
a__ , [
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
[
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
{'''sequence''': ANY(a__ ), '''score''': ANY(a__ ), '''token''': ANY(a__ ), '''token_str''': ANY(a__ )},
],
] , )
| 141
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = get_activation('swish' )
self.assertIsInstance(lowerCAmelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: int = get_activation('silu' )
self.assertIsInstance(lowerCAmelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: int = get_activation('mish' )
self.assertIsInstance(lowerCAmelCase__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: List[str] = get_activation('gelu' )
self.assertIsInstance(lowerCAmelCase__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = DDIMPipeline
__lowercase : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__lowercase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__: Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
lowercase__: int = DDIMScheduler()
lowercase__: List[Any] = {'unet': unet, 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('mps' ):
lowercase__: Any = torch.manual_seed(lowerCAmelCase__ )
else:
lowercase__: Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowercase__: Any = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = 'cpu'
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = self.pipeline_class(**lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowercase__: str = pipe(**lowerCAmelCase__ ).images
lowercase__: List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase__: Optional[Any] = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowercase__: int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = 'google/ddpm-cifar10-32'
lowercase__: Union[str, Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowercase__: Optional[Any] = DDIMScheduler()
lowercase__: List[str] = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddim.to(lowerCAmelCase__ )
ddim.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: str = ddim(generator=lowerCAmelCase__ , eta=0.0 , output_type='numpy' ).images
lowercase__: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__: Optional[Any] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = 'google/ddpm-ema-bedroom-256'
lowercase__: int = UNetaDModel.from_pretrained(lowerCAmelCase__ )
lowercase__: Tuple = DDIMScheduler.from_pretrained(lowerCAmelCase__ )
lowercase__: Any = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
ddpm.to(lowerCAmelCase__ )
ddpm.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase__: Optional[int] = torch.manual_seed(0 )
lowercase__: Tuple = ddpm(generator=lowerCAmelCase__ , output_type='numpy' ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__: List[str] = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 335
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def A__ ( __A : int , __A : Any , __A : str=[] ) ->Dict:
__A =size[0] - overlap_pixels * 2
__A =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__A =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__A =np.pad(__A , mode='''linear_ramp''' , pad_width=__A , end_values=0 )
if "l" in remove_borders:
__A =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__A =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__A =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__A =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def A__ ( __A : str , __A : List[str] , __A : int ) ->str:
return max(__A , min(__A , __A ) )
def A__ ( __A : [int] , __A : [int] , __A : [int] ) ->Union[str, Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def A__ ( __A : [int] , __A : int , __A : [int] ) ->Tuple:
__A =list(__A )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__A =clamp_rect(__A , [0, 0] , [image_size[0], image_size[1]] )
return rect
def A__ ( __A : Union[str, Any] , __A : List[Any] , __A : List[Any] , __A : int ) ->Any:
__A =Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__A , (original_slice, 0) )
return result
def A__ ( __A : Any , __A : int ) ->List[Any]:
__A =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
__A =tile.crop(__A )
return tile
def A__ ( __A : int , __A : int ) ->List[str]:
__A =n % d
return n - divisor
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 3_5_0 , ):
'''simple docstring'''
super().__init__(
vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , low_res_scheduler=lowercase__ , scheduler=lowercase__ , max_noise_level=lowercase__ , )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ):
'''simple docstring'''
torch.manual_seed(0 )
__A =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__A =add_overlap_rect(lowercase__ , lowercase__ , image.size )
__A =image.crop(lowercase__ )
__A =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__A =translated_slice_x - (original_image_slice / 2)
__A =max(0 , lowercase__ )
__A =squeeze_tile(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__A =to_input.size
__A =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__A =super(lowercase__ , self ).__call__(image=lowercase__ , **lowercase__ ).images[0]
__A =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__A =unsqueeze_tile(lowercase__ , lowercase__ )
__A =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__A =[]
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
__A =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowercase__ ) , mode='''L''' , )
final_image.paste(
lowercase__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ , lowercase__ = 7_5 , lowercase__ = 9.0 , lowercase__ = 5_0 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = 1 , lowercase__ = 1_2_8 , lowercase__ = 3_2 , lowercase__ = 3_2 , ):
'''simple docstring'''
__A =Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
__A =math.ceil(image.size[0] / tile_size )
__A =math.ceil(image.size[1] / tile_size )
__A =tcx * tcy
__A =0
for y in range(lowercase__ ):
for x in range(lowercase__ ):
self._process_tile(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , prompt=lowercase__ , num_inference_steps=lowercase__ , guidance_scale=lowercase__ , noise_level=lowercase__ , negative_prompt=lowercase__ , num_images_per_prompt=lowercase__ , eta=lowercase__ , generator=lowercase__ , latents=lowercase__ , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def A__ ( ) ->int:
# Run a demo
__A ='''stabilityai/stable-diffusion-x4-upscaler'''
__A =StableDiffusionTiledUpscalePipeline.from_pretrained(__A , revision='''fp16''' , torch_dtype=torch.floataa )
__A =pipe.to('''cuda''' )
__A =Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(__A : List[Any] ):
print(F'''progress: {obj['progress']:.4f}''' )
obj["image"].save('''diffusers_library_progress.jpg''' )
__A =pipe(image=__A , prompt='''Black font, white background, vector''' , noise_level=40 , callback=__A )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 184
|
def A__ ( __A : str , __A : str ) ->str:
if not (isinstance(__A , __A ) and isinstance(__A , __A )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
__A =len(__A )
__A =len(__A )
__A =[[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__A =0
__A =0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__A =1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__A =i
__A =dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
| 1
|
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def A ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 52
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
snake_case_ : Any = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase_ ( _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Tuple ):
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase : List[Any] = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase : Optional[int] = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase : Tuple = value
elif weight_type == "weight_g":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : int = value
elif weight_type == "bias":
UpperCAmelCase : str = value
else:
UpperCAmelCase : Any = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase_ ( _lowercase : List[str] , _lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = []
UpperCAmelCase : Dict = fairseq_model.state_dict()
UpperCAmelCase : int = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase : Optional[Any] = None
for name, value in fairseq_dict.items():
UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase : Optional[int] = True
elif name.split("." )[0] == "proj":
UpperCAmelCase : List[Any] = fairseq_model.proj
UpperCAmelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase : str = True
if "*" in mapped_key:
UpperCAmelCase : Optional[int] = name.split(_lowercase )[0].split("." )[-2]
UpperCAmelCase : str = mapped_key.replace("*" , _lowercase )
if "weight_g" in name:
UpperCAmelCase : Any = "weight_g"
elif "weight_v" in name:
UpperCAmelCase : int = "weight_v"
elif "bias" in name:
UpperCAmelCase : Optional[int] = "bias"
elif "weight" in name:
UpperCAmelCase : Dict = "weight"
else:
UpperCAmelCase : Optional[Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowercase_ ( _lowercase : Dict , _lowercase : str , _lowercase : int , _lowercase : str , _lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase : str = full_name.split("conv_layers." )[-1]
UpperCAmelCase : Optional[int] = name.split("." )
UpperCAmelCase : Optional[Any] = int(items[0] )
UpperCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
def lowercase_ ( _lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = emb.weight.shape
UpperCAmelCase : Union[str, Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase : Any = emb.weight.data
return lin_layer
def lowercase_ ( _lowercase : Tuple ):
'''simple docstring'''
with open(_lowercase , "r" , encoding="utf-8" ) as f:
UpperCAmelCase : Optional[int] = f.readlines()
UpperCAmelCase : Dict = [line.split(" " )[0] for line in lines]
UpperCAmelCase : Tuple = len(_lowercase )
UpperCAmelCase : Tuple = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(_lowercase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : int , _lowercase : Any , _lowercase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase : Dict = WavaVecaConfig.from_pretrained(_lowercase )
UpperCAmelCase : List[str] = SpeechaTextaConfig.from_pretrained(
_lowercase , vocab_size=_lowercase , decoder_layers=_lowercase , do_stable_layer_norm=_lowercase )
UpperCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase : Dict = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase : Optional[int] = WavaVecaModel(_lowercase )
UpperCAmelCase : Optional[int] = recursively_load_weights_wavaveca(model.encoder , _lowercase )
UpperCAmelCase : List[str] = SpeechaTextaForCausalLM(_lowercase )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowercase )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCAmelCase : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase : Dict = SpeechEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase )
UpperCAmelCase : List[str] = False
# add projection layer
UpperCAmelCase : List[str] = nn.Parameter(projection_layer.weight )
UpperCAmelCase : Any = nn.Parameter(projection_layer.bias )
UpperCAmelCase : str = create_vocab_dict(_lowercase )
with open(os.path.join(_lowercase , "vocab.json" ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
UpperCAmelCase : List[Any] = SpeechaTextaTokenizer(os.path.join(_lowercase , "vocab.json" ) )
tokenizer.save_pretrained(_lowercase )
UpperCAmelCase : Union[str, Any] = hf_wavavec.config.to_dict()
UpperCAmelCase : List[Any] = tokenizer.pad_token_id
UpperCAmelCase : Optional[int] = tokenizer.bos_token_id
UpperCAmelCase : Optional[int] = tokenizer.eos_token_id
UpperCAmelCase : Any = "speech_to_text_2"
UpperCAmelCase : Optional[int] = "wav2vec2"
UpperCAmelCase : Any = SpeechEncoderDecoderConfig.from_dict(_lowercase )
hf_wavavec.save_pretrained(_lowercase )
feature_extractor.save_pretrained(_lowercase )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0_2_2_4, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
snake_case_ : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 595
|
import torch
from diffusers import DiffusionPipeline
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def __call__(self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCamelCase__ = 1
UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler_output - scheduler_output + torch.ones_like(SCREAMING_SNAKE_CASE_ )
return result
| 415
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowerCamelCase__ : List[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowerCamelCase__ : int = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowerCamelCase__ : Tuple = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : str = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : List[Any] = evaluate(dataset=lowerCamelCase_ , predictions=lowerCamelCase_ )
return score
| 719
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Tuple = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : List[Any] = logging.get_logger(__name__)
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Any=False ) -> Tuple:
"""simple docstring"""
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
lowercase__ = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
lowercase__ = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase__ = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowercase__ = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(__magic_name__ )-1}''' )
if "norm" in key:
lowercase__ = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase__ = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
lowercase__ = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(__magic_name__ )-1}''' )
if "layer_norm1" in key:
lowercase__ = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowercase__ = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowercase__ = key[key.find("""block""" ) + len("""block""" )]
lowercase__ = key.replace(f'''block{idx}''' , f'''block.{int(__magic_name__ )-1}''' )
if "attn.q" in key:
lowercase__ = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowercase__ = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowercase__ = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowercase__ = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowercase__ = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowercase__ = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowercase__ = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowercase__ = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase__ = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowercase__ = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(__magic_name__ )-1}''' )
if key.startswith("""head""" ):
lowercase__ = key.replace("""head""" , """classifier""" )
lowercase__ = value
return new_state_dict
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase__ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowercase__ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowercase__ = kv_weight[
: config.hidden_sizes[i], :
]
lowercase__ = kv_bias[: config.hidden_sizes[i]]
lowercase__ = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase__ = kv_bias[
config.hidden_sizes[i] :
]
def UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return image
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = SegformerConfig()
lowercase__ = False
# set attributes based on model_name
lowercase__ = """huggingface/label-files"""
if "segformer" in model_name:
lowercase__ = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
lowercase__ = 150
lowercase__ = """ade20k-id2label.json"""
lowercase__ = (1, 150, 128, 128)
elif "city" in model_name:
lowercase__ = 19
lowercase__ = """cityscapes-id2label.json"""
lowercase__ = (1, 19, 128, 128)
else:
raise ValueError(f'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowercase__ = True
lowercase__ = model_name[4:6]
lowercase__ = 1000
lowercase__ = """imagenet-1k-id2label.json"""
lowercase__ = (1, 1000)
else:
raise ValueError(f'''Model {model_name} not supported''' )
# set config attributes
lowercase__ = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
lowercase__ = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowercase__ = [64, 128, 320, 512]
lowercase__ = 256
elif size == "b2":
lowercase__ = [64, 128, 320, 512]
lowercase__ = 768
lowercase__ = [3, 4, 6, 3]
elif size == "b3":
lowercase__ = [64, 128, 320, 512]
lowercase__ = 768
lowercase__ = [3, 4, 18, 3]
elif size == "b4":
lowercase__ = [64, 128, 320, 512]
lowercase__ = 768
lowercase__ = [3, 8, 27, 3]
elif size == "b5":
lowercase__ = [64, 128, 320, 512]
lowercase__ = 768
lowercase__ = [3, 6, 40, 3]
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowercase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__magic_name__ , align=__magic_name__ , do_random_crop=__magic_name__ )
# prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__magic_name__ , return_tensors="""pt""" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowercase__ = torch.load(__magic_name__ , map_location=torch.device("""cpu""" ) )
else:
lowercase__ = torch.load(__magic_name__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
lowercase__ = rename_keys(__magic_name__ , encoder_only=__magic_name__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__magic_name__ , __magic_name__ )
# create HuggingFace model and load state dict
if encoder_only:
lowercase__ = False
lowercase__ = SegformerForImageClassification(__magic_name__ )
else:
lowercase__ = SegformerForSemanticSegmentation(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# forward pass
lowercase__ = model(__magic_name__ )
lowercase__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowercase__ = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowercase__ = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowercase__ = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowercase__ = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowercase__ = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowercase__ = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowercase__ = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowercase__ = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowercase__ = torch.tensor(
[
[
[-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1],
[-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1],
[-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1],
],
[
[-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1],
[-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1],
[-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1],
],
[
[7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2],
[4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1],
[3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowercase__ = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowercase__ = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowercase__ = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowercase__ = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowercase__ = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowercase__ = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
lowercase__ = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A : int = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 15
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''M-CLIP'''
def __init__( self : Dict , lowercase : Any=10_24 , lowercase : Optional[Any]=7_68 , **lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : Any = transformerDimSize
UpperCAmelCase : Optional[int] = imageDimSize
super().__init__(**lowercase )
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = MCLIPConfig
def __init__( self : Optional[Any] , lowercase : List[Any] , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
super().__init__(lowercase , *lowercase , **lowercase )
UpperCAmelCase : Union[str, Any] = XLMRobertaModel(lowercase )
UpperCAmelCase : List[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __lowerCAmelCase ( self : Dict , lowercase : List[Any] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.transformer(input_ids=lowercase , attention_mask=lowercase )[0]
UpperCAmelCase : Dict = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowercase ), embs
| 595
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCamelCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Union[str, Any] = load_tool('text-to-speech' )
self.tool.setup()
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_lowerCAmelCase :List[str] = self.tool('hey' )
_lowerCAmelCase :Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_lowerCAmelCase :List[str] = self.tool('hey' )
_lowerCAmelCase :Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 713
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
a = logging.get_logger(__name__)
class UpperCAmelCase_ (snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = 'mask2former'
lowerCamelCase : Union[str, Any] = ['swin']
lowerCamelCase : Union[str, Any] = {'hidden_size': 'hidden_dim'}
def __init__( self: Dict , _UpperCAmelCase: Optional[Dict] = None , _UpperCAmelCase: int = 256 , _UpperCAmelCase: int = 256 , _UpperCAmelCase: int = 256 , _UpperCAmelCase: int = 1024 , _UpperCAmelCase: str = "relu" , _UpperCAmelCase: int = 6 , _UpperCAmelCase: int = 10 , _UpperCAmelCase: int = 8 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: int = 2048 , _UpperCAmelCase: bool = False , _UpperCAmelCase: bool = False , _UpperCAmelCase: int = 4 , _UpperCAmelCase: int = 255 , _UpperCAmelCase: int = 100 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 2.0 , _UpperCAmelCase: float = 5.0 , _UpperCAmelCase: float = 5.0 , _UpperCAmelCase: int = 1_2544 , _UpperCAmelCase: float = 3.0 , _UpperCAmelCase: float = 0.7_5 , _UpperCAmelCase: float = 0.0_2 , _UpperCAmelCase: float = 1.0 , _UpperCAmelCase: bool = True , _UpperCAmelCase: List[int] = [4, 8, 16, 32] , _UpperCAmelCase: bool = None , **_UpperCAmelCase: Optional[int] , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
_lowerCAmelCase :List[str] = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase :int = backbone_config.pop('model_type' )
_lowerCAmelCase :int = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase :Union[str, Any] = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
_lowerCAmelCase :List[Any] = backbone_config
_lowerCAmelCase :Any = feature_size
_lowerCAmelCase :int = mask_feature_size
_lowerCAmelCase :Union[str, Any] = hidden_dim
_lowerCAmelCase :Optional[int] = encoder_feedforward_dim
_lowerCAmelCase :Tuple = activation_function
_lowerCAmelCase :Optional[Any] = encoder_layers
_lowerCAmelCase :int = decoder_layers
_lowerCAmelCase :Tuple = num_attention_heads
_lowerCAmelCase :Tuple = dropout
_lowerCAmelCase :Optional[Any] = dim_feedforward
_lowerCAmelCase :Tuple = pre_norm
_lowerCAmelCase :List[str] = enforce_input_projection
_lowerCAmelCase :Union[str, Any] = common_stride
_lowerCAmelCase :Optional[int] = ignore_value
_lowerCAmelCase :Any = num_queries
_lowerCAmelCase :Any = no_object_weight
_lowerCAmelCase :List[str] = class_weight
_lowerCAmelCase :str = mask_weight
_lowerCAmelCase :Dict = dice_weight
_lowerCAmelCase :int = train_num_points
_lowerCAmelCase :str = oversample_ratio
_lowerCAmelCase :Dict = importance_sample_ratio
_lowerCAmelCase :List[str] = init_std
_lowerCAmelCase :List[Any] = init_xavier_std
_lowerCAmelCase :int = use_auxiliary_loss
_lowerCAmelCase :Tuple = feature_strides
_lowerCAmelCase :Tuple = output_auxiliary_logits
_lowerCAmelCase :Dict = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls: Any , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ):
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Optional[int] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase :List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase :Tuple = self.__class__.model_type
return output
| 382
| 0
|
import math
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCAmelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__snake_case :Optional[int] ='Enter the base and the power separated by a comma: '
__snake_case , __snake_case :str =map(int, input(prompt).split(','))
__snake_case , __snake_case :Optional[int] =map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
__snake_case :int =res(xa, ya)
__snake_case :Any =res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 106
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase__ ( _lowerCamelCase ) ->List[Any]:
_UpperCAmelCase =384
if "tiny" in model_name:
_UpperCAmelCase =[3, 3, 9, 3]
_UpperCAmelCase =[96, 192, 384, 768]
if "small" in model_name:
_UpperCAmelCase =[3, 3, 27, 3]
_UpperCAmelCase =[96, 192, 384, 768]
if "base" in model_name:
_UpperCAmelCase =[3, 3, 27, 3]
_UpperCAmelCase =[128, 256, 512, 1024]
_UpperCAmelCase =512
if "large" in model_name:
_UpperCAmelCase =[3, 3, 27, 3]
_UpperCAmelCase =[192, 384, 768, 1536]
_UpperCAmelCase =768
if "xlarge" in model_name:
_UpperCAmelCase =[3, 3, 27, 3]
_UpperCAmelCase =[256, 512, 1024, 2048]
_UpperCAmelCase =1024
# set label information
_UpperCAmelCase =150
_UpperCAmelCase ="huggingface/label-files"
_UpperCAmelCase ="ade20k-id2label.json"
_UpperCAmelCase =json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase ={int(_lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase ={v: k for k, v in idalabel.items()}
_UpperCAmelCase =ConvNextConfig(
depths=_lowerCamelCase , hidden_sizes=_lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] )
_UpperCAmelCase =UperNetConfig(
backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def lowerCamelCase__ ( _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =[]
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
_UpperCAmelCase =dct.pop(_lowerCamelCase )
_UpperCAmelCase =val
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase ={
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
_UpperCAmelCase =model_name_to_url[model_name]
_UpperCAmelCase =torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_UpperCAmelCase =get_upernet_config(_lowerCamelCase )
_UpperCAmelCase =UperNetForSemanticSegmentation(_lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_UpperCAmelCase =state_dict.pop(_lowerCamelCase )
if "bn" in key:
_UpperCAmelCase =key.replace("bn" , "batch_norm" )
_UpperCAmelCase =val
# rename keys
_UpperCAmelCase =create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# verify on image
_UpperCAmelCase ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
_UpperCAmelCase =SegformerImageProcessor()
_UpperCAmelCase =processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
with torch.no_grad():
_UpperCAmelCase =model(_lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_UpperCAmelCase =torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_UpperCAmelCase =torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_UpperCAmelCase =torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_UpperCAmelCase =torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_UpperCAmelCase =torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
snake_case__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case__ : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 408
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def __UpperCAmelCase( lowercase_ , lowercase_ = 16 ):
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowerCamelCase : Tuple = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowercase_ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Any = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Dict = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : int = 8
else:
_lowerCamelCase : int = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
_lowerCamelCase : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase_ ) == "1":
_lowerCamelCase : List[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_lowerCamelCase : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_lowerCamelCase : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : str = config['''lr''']
_lowerCamelCase : Optional[Any] = int(config['''num_epochs'''] )
_lowerCamelCase : Optional[int] = int(config['''seed'''] )
_lowerCamelCase : str = int(config['''batch_size'''] )
set_seed(lowercase_ )
_lowerCamelCase : List[str] = get_dataloaders(lowercase_ , lowercase_ )
_lowerCamelCase : Dict = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : str = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Optional[Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : List[str] = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
_lowerCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase : Tuple = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_lowerCamelCase : Optional[int] = os.path.split(lowercase_ )[-1].split('''.''' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_lowerCamelCase : str = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : List[str] = model(**lowercase_ )
_lowerCamelCase : Optional[Any] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_lowerCamelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Dict = model(**lowercase_ )
_lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
_lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
_lowerCamelCase : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(lowercase_ ),
'''epoch''': epoch,
} , step=lowercase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCAmelCase( ):
_lowerCamelCase : str = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=lowercase_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : str = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 700
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(a__ , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(a__ , '''num_attention_heads'''))
class __A :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=64 , a__=3 , a__=3 , a__=2 , a__=1 , a__=16 , a__=[128, 256, 384] , a__=[4, 6, 8] , a__=[2, 3, 4] , a__=[16, 16, 16] , a__=0 , a__=[2, 2, 2] , a__=[2, 2, 2] , a__=0.02 , a__=True , a__=True , a__=2 , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Any = kernel_size
_lowerCamelCase : int = stride
_lowerCamelCase : Dict = padding
_lowerCamelCase : Union[str, Any] = hidden_sizes
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Any = depths
_lowerCamelCase : List[Any] = key_dim
_lowerCamelCase : Optional[Any] = drop_path_rate
_lowerCamelCase : Union[str, Any] = patch_size
_lowerCamelCase : int = attention_ratio
_lowerCamelCase : Any = mlp_ratio
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Dict = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCamelCase : str = is_training
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[Any] = num_labels
_lowerCamelCase : Union[str, Any] = initializer_range
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = LevitModel(config=a__)
model.to(a__)
model.eval()
_lowerCamelCase : List[str] = model(a__)
_lowerCamelCase : List[str] = (self.image_size, self.image_size)
_lowerCamelCase, _lowerCamelCase : Optional[int] = image_size[0], image_size[1]
for _ in range(4):
_lowerCamelCase : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
_lowerCamelCase : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , )
def __snake_case ( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : Dict = LevitForImageClassification(a__)
model.to(a__)
model.eval()
_lowerCamelCase : List[Any] = model(a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = LevitModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37)
def __snake_case ( self):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self):
"""simple docstring"""
return
@unittest.skip(reason='''Levit does not use inputs_embeds''')
def __snake_case ( self):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''')
def __snake_case ( self):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not output attentions''')
def __snake_case ( self):
"""simple docstring"""
pass
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(a__)
_lowerCamelCase : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__)
def __snake_case ( self):
"""simple docstring"""
def check_hidden_states_output(a__ , a__ , a__):
_lowerCamelCase : List[Any] = model_class(a__)
model.to(a__)
model.eval()
with torch.no_grad():
_lowerCamelCase : Any = model(**self._prepare_for_class(a__ , a__))
_lowerCamelCase : int = outputs.hidden_states
_lowerCamelCase : List[str] = len(self.model_tester.depths) + 1
self.assertEqual(len(a__) , a__)
_lowerCamelCase : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
_lowerCamelCase, _lowerCamelCase : Tuple = image_size[0], image_size[1]
for _ in range(4):
_lowerCamelCase : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
_lowerCamelCase : List[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowerCamelCase, _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = True
check_hidden_states_output(a__ , a__ , a__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[str] = True
check_hidden_states_output(a__ , a__ , a__)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def __snake_case ( self):
"""simple docstring"""
pass
def __snake_case ( self , a__ , a__ , a__=False):
"""simple docstring"""
_lowerCamelCase : Dict = super()._prepare_for_class(a__ , a__ , return_labels=a__)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__)
def __snake_case ( self):
"""simple docstring"""
if not self.model_tester.is_training:
return
_lowerCamelCase, _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a__)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCamelCase : Optional[Any] = model_class(a__)
model.to(a__)
model.train()
_lowerCamelCase : Any = self._prepare_for_class(a__ , a__ , return_labels=a__)
_lowerCamelCase : Tuple = model(**a__).loss
loss.backward()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(a__) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCamelCase : List[Any] = model_class(a__)
model.gradient_checkpointing_enable()
model.to(a__)
model.train()
_lowerCamelCase : int = self._prepare_for_class(a__ , a__ , return_labels=a__)
_lowerCamelCase : Any = model(**a__).loss
loss.backward()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Dict = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a__),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}"""):
_lowerCamelCase : Tuple = problem_type['''title''']
_lowerCamelCase : Tuple = problem_type['''num_labels''']
_lowerCamelCase : int = model_class(a__)
model.to(a__)
model.train()
_lowerCamelCase : int = self._prepare_for_class(a__ , a__ , return_labels=a__)
if problem_type["num_labels"] > 1:
_lowerCamelCase : int = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
_lowerCamelCase : Tuple = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a__) as warning_list:
_lowerCamelCase : Any = model(**a__).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def __snake_case ( self):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = LevitModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def __UpperCAmelCase( ):
_lowerCamelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
a__)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : str = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(**a__)
# verify the logits
_lowerCamelCase : List[str] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , a__)
_lowerCamelCase : Optional[Any] = torch.tensor([1.0448, -0.3745, -1.8317]).to(a__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4))
| 613
| 0
|
from collections import Counter
from timeit import timeit
def _lowercase( __a : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _lowercase( __a : str = "" ):
if len(__a ) == 0:
return True
a__ =input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
a__ ={}
for character in lower_case_input_str:
a__ =character_freq_dict.get(__a , 0 ) + 1
a__ =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowercase( __a : str = "" ):
print('\nFor string = ' , __a , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(__a ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(__a ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
_lowerCAmelCase: Any = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_lowerCAmelCase: Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 20
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __UpperCamelCase ( _UpperCAmelCase ):
_UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 718
|
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def __a ( *A_: str , **A_: int ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case (unittest.TestCase ):
__a = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __a ( self: Tuple , A_: List[Any] , A_: Any , A_: str ):
__lowerCamelCase = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __a ( self: str , A_: Optional[Any] , A_: Optional[int] ):
__lowerCamelCase = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"""score""": ANY(lowercase_ ),
"""label""": ANY(lowercase_ ),
"""box""": {"""xmin""": ANY(lowercase_ ), """ymin""": ANY(lowercase_ ), """xmax""": ANY(lowercase_ ), """ymax""": ANY(lowercase_ )},
} , )
import datasets
__lowerCamelCase = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
__lowerCamelCase = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
__lowerCamelCase = object_detector(lowercase_ , threshold=0.0 )
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"""score""": ANY(lowercase_ ),
"""label""": ANY(lowercase_ ),
"""box""": {"""xmin""": ANY(lowercase_ ), """ymin""": ANY(lowercase_ ), """xmax""": ANY(lowercase_ ), """ymax""": ANY(lowercase_ )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __a ( self: List[str] ):
pass
@require_torch
def __a ( self: Tuple ):
__lowerCamelCase = """hf-internal-testing/tiny-detr-mobilenetsv3"""
__lowerCamelCase = AutoModelForObjectDetection.from_pretrained(lowercase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(lowercase_ )
__lowerCamelCase = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
__lowerCamelCase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
] , )
__lowerCamelCase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
],
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_59, """ymin""": 1_20, """xmax""": 4_80, """ymax""": 3_59}},
],
] , )
@require_torch
@slow
def __a ( self: Dict ):
__lowerCamelCase = """facebook/detr-resnet-50"""
__lowerCamelCase = AutoModelForObjectDetection.from_pretrained(lowercase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(lowercase_ )
__lowerCamelCase = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
__lowerCamelCase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
__lowerCamelCase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
] , )
@require_torch
@slow
def __a ( self: Dict ):
__lowerCamelCase = """facebook/detr-resnet-50"""
__lowerCamelCase = pipeline("""object-detection""" , model=lowercase_ )
__lowerCamelCase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
__lowerCamelCase = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 1_75, """ymax""": 1_17}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_33, """ymin""": 72, """xmax""": 3_68, """ymax""": 1_87}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_39, """ymax""": 4_73}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
],
] , )
@require_torch
@slow
def __a ( self: Optional[int] ):
__lowerCamelCase = 0.9_985
__lowerCamelCase = """facebook/detr-resnet-50"""
__lowerCamelCase = pipeline("""object-detection""" , model=lowercase_ )
__lowerCamelCase = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 3_14, """ymax""": 4_70}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_45, """ymin""": 23, """xmax""": 6_40, """ymax""": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def __a ( self: Optional[Any] ):
__lowerCamelCase = """Narsil/layoutlmv3-finetuned-funsd"""
__lowerCamelCase = 0.9_993
__lowerCamelCase = pipeline("""object-detection""" , model=lowercase_ , threshold=lowercase_ )
__lowerCamelCase = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}},
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_94, """ymin""": 2_54, """xmax""": 3_43, """ymax""": 2_64}},
] , )
| 281
|
import argparse
import copy
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = {}
with open(a_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCAmelCase_ = []
_list.append([line.split()[1], line.split()[2]] )
lowerCAmelCase_ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCAmelCase_ = []
_list.append([line.split()[0], line.split()[2]] )
lowerCAmelCase_ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase ( a_ , a_ ) -> Dict:
with open(a_ ) as f:
lowerCAmelCase_ = f.read(1 )
lowerCAmelCase_ = start_node
lowerCAmelCase_ = []
lowerCAmelCase_ = start_node
lowerCAmelCase_ = 0
while visiting not in first_solution:
lowerCAmelCase_ = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a_ ) and k[0] not in first_solution:
lowerCAmelCase_ = k[1]
lowerCAmelCase_ = k[0]
first_solution.append(a_ )
lowerCAmelCase_ = distance_of_first_solution + int(a_ )
lowerCAmelCase_ = best_node
first_solution.append(a_ )
lowerCAmelCase_ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCAmelCase_ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def lowerCamelCase ( a_ , a_ ) -> str:
lowerCAmelCase_ = []
for n in solution[1:-1]:
lowerCAmelCase_ = solution.index(a_ )
for kn in solution[1:-1]:
lowerCAmelCase_ = solution.index(a_ )
if n == kn:
continue
lowerCAmelCase_ = copy.deepcopy(a_ )
lowerCAmelCase_ = kn
lowerCAmelCase_ = n
lowerCAmelCase_ = 0
for k in _tmp[:-1]:
lowerCAmelCase_ = _tmp[_tmp.index(a_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCAmelCase_ = distance + int(i[1] )
_tmp.append(a_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCAmelCase_ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = first_solution
lowerCAmelCase_ = []
lowerCAmelCase_ = distance_of_first_solution
lowerCAmelCase_ = solution
while count <= iters:
lowerCAmelCase_ = find_neighborhood(a_ , a_ )
lowerCAmelCase_ = 0
lowerCAmelCase_ = neighborhood[index_of_best_solution]
lowerCAmelCase_ = len(a_ ) - 1
lowerCAmelCase_ = False
while not found:
lowerCAmelCase_ = 0
while i < len(a_ ):
if best_solution[i] != solution[i]:
lowerCAmelCase_ = best_solution[i]
lowerCAmelCase_ = solution[i]
break
lowerCAmelCase_ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCAmelCase_ = True
lowerCAmelCase_ = best_solution[:-1]
lowerCAmelCase_ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCAmelCase_ = cost
lowerCAmelCase_ = solution
else:
lowerCAmelCase_ = index_of_best_solution + 1
lowerCAmelCase_ = neighborhood[index_of_best_solution]
if len(a_ ) >= size:
tabu_list.pop(0 )
lowerCAmelCase_ = count + 1
return best_solution_ever, best_cost
def lowerCamelCase ( a_=None ) -> Optional[int]:
lowerCAmelCase_ = generate_neighbours(args.File )
lowerCAmelCase_ , lowerCAmelCase_ = generate_first_solution(
args.File , a_ )
lowerCAmelCase_ , lowerCAmelCase_ = tabu_search(
a_ , a_ , a_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 318
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
snake_case = []
snake_case = set({'(', '[', '{'} )
snake_case = set({')', ']', '}'} )
snake_case = {'{': '}', '[': ']', '(': ')'}
for i in range(len(_UpperCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCamelCase ) == 0 or (len(_UpperCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCamelCase ) == 0
def lowerCAmelCase__ ( ) -> str:
"""simple docstring"""
snake_case = input('Enter sequence of brackets: ' )
if is_balanced(_UpperCamelCase ):
print(_UpperCamelCase , 'is balanced' )
else:
print(_UpperCamelCase , 'is not balanced' )
if __name__ == "__main__":
main()
| 104
|
"""simple docstring"""
import os
import string
import sys
SCREAMING_SNAKE_CASE__ = 1 << 8
SCREAMING_SNAKE_CASE__ = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
SCREAMING_SNAKE_CASE__ = KEYMAP["up"]
SCREAMING_SNAKE_CASE__ = KEYMAP["left"]
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
SCREAMING_SNAKE_CASE__ = ord(str(i))
def lowerCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCamelCase ) == 0:
# Read the keystroke
snake_case = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_UpperCamelCase )
if ord(_UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
snake_case = chr(KEYMAP['esc'] )
except KeyError:
snake_case = cha[1]
else:
snake_case = ch.decode(_UpperCamelCase )
else:
snake_case = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case = sys.stdin.fileno()
snake_case = termios.tcgetattr(_UpperCamelCase )
try:
tty.setraw(_UpperCamelCase )
snake_case = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCamelCase , termios.TCSADRAIN , _UpperCamelCase )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case = get_raw_chars()
if ord(_UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCamelCase ) == KEYMAP["esc"]:
snake_case = get_raw_chars()
if ord(_UpperCamelCase ) == KEYMAP["mod_int"]:
snake_case = get_raw_chars()
if ord(_UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 104
| 1
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Dict , *__magic_name__ : List[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Any , *__magic_name__ : Optional[Any] , __magic_name__ : Optional[int]=1 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCAmelCase__ = []
for i in range(__magic_name__ ):
lowerCAmelCase__ = placeholder_token + f"""_{i}"""
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
lowerCAmelCase__ = output
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=False , __magic_name__ : List[str]=1.0 ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCAmelCase__ = self.token_map[placeholder_token]
lowerCAmelCase__ = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCAmelCase__ = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCAmelCase__ = text.replace(__magic_name__ , " ".join(__magic_name__ ) )
return text
def __call__( self : List[Any] , __magic_name__ : Union[str, Any] , *__magic_name__ : int , __magic_name__ : Union[str, Any]=False , __magic_name__ : Union[str, Any]=1.0 , **__magic_name__ : List[str] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Union[str, Any] , *__magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any]=False , __magic_name__ : Union[str, Any]=1.0 , **__magic_name__ : Any ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 48
|
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[list]:
snake_case : List[str] = current_set.copy()
for row_index, row in enumerate(lowercase ):
snake_case : List[Any] = row[0]
for column_index, column in enumerate(lowercase ):
if magnitude == 0:
snake_case : Tuple = column
continue
snake_case : Dict = column / magnitude
# Subtract to cancel term
snake_case : Union[str, Any] = current_set[0]
snake_case : int = [first_row]
snake_case : Dict = current_set[1::]
for row in current_set:
snake_case : List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowercase )
continue
for column_index in range(len(lowercase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowercase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case : Optional[Any] = final_set[0]
snake_case : List[str] = []
snake_case : Dict = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case : Dict = simplify(lowercase )
for i in range(len(lowercase ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,lowercase )
snake_case : List[str] = resultant
return final_set
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
if len(lowercase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
snake_case : List[Any] = len(lowercase ) + 1
if any(len(lowercase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowercase ,(int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowercase ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case : List[Any] = equations.copy()
if any(0 in row for row in data_set ):
snake_case : Union[str, Any] = data_set.copy()
snake_case : str = []
for row_index, row in enumerate(lowercase ):
if 0 not in row:
snake_case : Optional[Any] = data_set.pop(lowercase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 ,lowercase )
snake_case : Dict = data_set.copy()
snake_case : List[str] = simplify(lowercase )
snake_case : str = simplified[::-1]
snake_case : list = []
for row in simplified:
snake_case : List[Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case : List[str] = row.copy()[: len(lowercase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowercase ) == 0:
solutions.append(0 )
continue
snake_case : List[str] = temp_row[1::]
snake_case : str = temp_row[::-1]
for column_index, column in enumerate(lowercase ):
current_solution -= column * solutions[column_index]
solutions.append(lowercase )
snake_case : List[Any] = []
for item in solutions:
final.append(float(round(lowercase ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Union[str, Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 587
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__( self: List[Any], a_: Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = hidden_states.shape
_snake_case : int = jax.image.resize(
a_, shape=(batch, height * 2, width * 2, channels), method="""nearest""", )
_snake_case : str = self.conv(a_ )
return hidden_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__( self: str, a_: Any ):
'''simple docstring'''
_snake_case : int = self.conv(a_ )
return hidden_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = None
lowercase__ = 0.0
lowercase__ = None
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
_snake_case : Dict = nn.GroupNorm(num_groups=32, epsilon=1E-5 )
_snake_case : Optional[Any] = nn.Conv(
a_, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_snake_case : Dict = nn.Dense(a_, dtype=self.dtype )
_snake_case : str = nn.GroupNorm(num_groups=32, epsilon=1E-5 )
_snake_case : Union[str, Any] = nn.Dropout(self.dropout_prob )
_snake_case : Optional[Any] = nn.Conv(
a_, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_snake_case : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_snake_case : int = None
if use_nin_shortcut:
_snake_case : Union[str, Any] = nn.Conv(
a_, kernel_size=(1, 1), strides=(1, 1), padding="""VALID""", dtype=self.dtype, )
def __call__( self: List[Any], a_: Optional[int], a_: List[str], a_: str=True ):
'''simple docstring'''
_snake_case : int = hidden_states
_snake_case : Union[str, Any] = self.norma(a_ )
_snake_case : int = nn.swish(a_ )
_snake_case : str = self.conva(a_ )
_snake_case : Any = self.time_emb_proj(nn.swish(a_ ) )
_snake_case : Any = jnp.expand_dims(jnp.expand_dims(a_, 1 ), 1 )
_snake_case : Tuple = hidden_states + temb
_snake_case : Optional[Any] = self.norma(a_ )
_snake_case : str = nn.swish(a_ )
_snake_case : int = self.dropout(a_, a_ )
_snake_case : Dict = self.conva(a_ )
if self.conv_shortcut is not None:
_snake_case : Dict = self.conv_shortcut(a_ )
return hidden_states + residual
| 28
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase( __a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def UpperCamelCase_ ( a_: ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
raise NotImplementedError()
| 28
| 1
|
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
A_ : Optional[int] =logging.get_logger(__name__)
A_ : List[Any] =R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class __UpperCAmelCase ( __a ):
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCAmelCase_ = max_length
lowerCAmelCase_ = max_position_embeddings
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
lowerCAmelCase_ = input_ids.shape[-1]
lowerCAmelCase_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'''with `max_length = start_length + max_new_tokens` instead.''' , _lowerCamelCase , )
lowerCAmelCase_ = start_length
lowerCAmelCase_ = max_new_tokens
lowerCAmelCase_ = start_length + max_new_tokens
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
return input_ids.shape[-1] >= self.max_length
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCAmelCase_ = max_time
lowerCAmelCase_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
return time.time() - self.initial_timestamp > self.max_time
class __UpperCAmelCase ( __a ):
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
return any(criteria(_lowerCamelCase , _lowerCamelCase ) for criteria in self )
@property
def UpperCAmelCase_ ( self ):
for stopping_criterium in self:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return stopping_criterium.max_length
return None
def snake_case_ ( __snake_case : StoppingCriteriaList , __snake_case : int) -> StoppingCriteriaList:
lowerCAmelCase_ = stopping_criteria.max_length
lowerCAmelCase_ = deepcopy(__snake_case)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __snake_case)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__snake_case))
return new_stopping_criteria
| 274
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class __UpperCAmelCase :
def __init__( self ):
lowerCAmelCase_ = psutil.Process()
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = -1
while True:
lowerCAmelCase_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = True
lowerCAmelCase_ = threading.Thread(target=self.peak_monitor )
lowerCAmelCase_ = True
self.thread.start()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = False
self.thread.join()
return self.cpu_memory_peak
A_ : List[str] =PeakCPUMemory()
def snake_case_ ( ) -> Tuple:
# Time
lowerCAmelCase_ = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCAmelCase_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count()):
lowerCAmelCase_ = torch.cuda.memory_allocated(__snake_case)
torch.cuda.reset_peak_memory_stats()
return measures
def snake_case_ ( __snake_case : Any) -> List[str]:
# Time
lowerCAmelCase_ = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCAmelCase_ = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
lowerCAmelCase_ = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count()):
lowerCAmelCase_ = (torch.cuda.memory_allocated(__snake_case) - start_measures[str(__snake_case)]) / 2**20
lowerCAmelCase_ = (torch.cuda.max_memory_allocated(__snake_case) - start_measures[str(__snake_case)]) / 2**20
return measures
def snake_case_ ( __snake_case : Dict , __snake_case : Optional[int]) -> Dict:
print(F'''{description}:''')
print(F'''- Time: {measures['time']:.2f}s''')
for i in range(torch.cuda.device_count()):
print(F'''- GPU {i} allocated: {measures[str(__snake_case)]:.2f}MiB''')
lowerCAmelCase_ = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''')
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''')
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''')
| 274
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( snake_case : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:Optional[int] = str(_lowerCamelCase )
_lowerCAmelCase:str = [n]
for i in range(1 , len(_lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCAmelCase ( snake_case : int ):
if len(str(_lowerCamelCase ) ) > 3:
if not is_prime(int(str(_lowerCamelCase )[-3:] ) ) or not is_prime(int(str(_lowerCamelCase )[:3] ) ):
return False
return True
def UpperCAmelCase ( snake_case : int = 11 ):
_lowerCAmelCase:Optional[int] = []
_lowerCAmelCase:Tuple = 13
while len(_lowerCamelCase ) != count:
if validate(_lowerCamelCase ):
_lowerCAmelCase:Dict = list_truncated_nums(_lowerCamelCase )
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCamelCase )
num += 2
return list_truncated_primes
def UpperCAmelCase ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"{sum(compute_truncated_primes(1_1)) = }")
| 700
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class a__ ( UpperCamelCase_ ):
def __init__( self : int ,*a__ : Optional[Any] ,**a__ : Union[str, Any]) -> Tuple:
"""simple docstring"""
super().__init__(*a__ ,**a__)
requires_backends(self ,'''vision''')
self.check_model_type(a__)
def __call__( self : str ,a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**a__ : List[str]) -> Optional[int]:
"""simple docstring"""
return super().__call__(a__ ,**a__)
def __UpperCamelCase ( self : Union[str, Any] ,**a__ : List[Any]) -> Any:
"""simple docstring"""
return {}, {}, {}
def __UpperCamelCase ( self : Tuple ,a__ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:List[str] = load_image(a__)
_lowerCAmelCase:int = image.size
_lowerCAmelCase:int = self.image_processor(images=a__ ,return_tensors=self.framework)
return model_inputs
def __UpperCamelCase ( self : Dict ,a__ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Any = self.model(**a__)
return model_outputs
def __UpperCamelCase ( self : List[Any] ,a__ : Dict) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = model_outputs.predicted_depth
_lowerCAmelCase:Union[str, Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=a__)
_lowerCAmelCase:List[str] = prediction.squeeze().cpu().numpy()
_lowerCAmelCase:Any = (output * 255 / np.max(a__)).astype('''uint8''')
_lowerCAmelCase:Dict = Image.fromarray(a__)
_lowerCAmelCase:Tuple = {}
_lowerCAmelCase:Optional[int] = predicted_depth
_lowerCAmelCase:str = depth
return output_dict
| 439
| 0
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A_ : Tuple = logging.getLogger()
A_ : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self , _lowerCamelCase ):
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
UpperCamelCase_: Tuple = {'source': 'What is love ?', 'target': 'life'}
UpperCamelCase_: Any = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: List[Any] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(_lowerCamelCase , f'''{split}.{field}''' ) , 'w' ) as f:
f.write(_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = "pytorch" ):
UpperCamelCase_: int = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Union[str, Any] = os.path.join(_lowerCamelCase , 'output' )
UpperCamelCase_: Union[str, Any] = os.path.join(_lowerCamelCase , 'data' )
self._create_dummy_data(data_dir=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
UpperCamelCase_: str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_lowerCamelCase , env=self.get_env() )
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'metrics.json' )
with open(_lowerCamelCase ) as f:
UpperCamelCase_: Union[str, Any] = json.load(_lowerCamelCase )
return result
@require_torch_gpu
def _a ( self ):
UpperCamelCase_: Any = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def _a ( self ):
UpperCamelCase_: Optional[int] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def _a ( self ):
UpperCamelCase_: Dict = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _a ( self ):
UpperCamelCase_: Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 57
|
import re
import string
import numpy as np
import datasets
__UpperCamelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__UpperCamelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__UpperCamelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def snake_case__ ( self , snake_case , snake_case , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase__ = np.array([re.sub(snake_case , "" , snake_case ) for x in predictions] )
UpperCamelCase__ = np.array([re.sub(snake_case , "" , snake_case ) for x in references] )
else:
UpperCamelCase__ = np.asarray(snake_case )
UpperCamelCase__ = np.asarray(snake_case )
if ignore_case:
UpperCamelCase__ = np.char.lower(snake_case )
UpperCamelCase__ = np.char.lower(snake_case )
if ignore_punctuation:
UpperCamelCase__ = string.punctuation.maketrans("" , "" , string.punctuation )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
if ignore_numbers:
UpperCamelCase__ = string.digits.maketrans("" , "" , string.digits )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = np.char.translate(snake_case , table=snake_case )
UpperCamelCase__ = predictions == references
return {"exact_match": np.mean(snake_case ) * 100}
| 551
| 0
|
"""simple docstring"""
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
return int((input_a, input_a).count(0 ) != 0 )
def _lowerCamelCase ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 128
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__snake_case = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__snake_case = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__snake_case = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]:
lowercase__ : List[Any] = spearmanr(lowerCamelCase__ , lowerCamelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 128
| 1
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = 10
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = [1, 2, 3, 4]
UpperCAmelCase__ : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase__ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase__ : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0 ) , _snake_case )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase__ : List[str] = process_story(_snake_case )
self.assertEqual(_snake_case , [] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = ''''''
UpperCAmelCase__ : List[str] = process_story(_snake_case )
self.assertEqual(_snake_case , [] )
self.assertEqual(_snake_case , [] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase__ : Tuple = process_story(_snake_case )
UpperCAmelCase__ : str = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case )
UpperCAmelCase__ : Any = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase__ : int = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_snake_case , 0 ).numpy() , expected.numpy() )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase__ : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_snake_case , 23 ).numpy() , expected.numpy() )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase__ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_snake_case , 1 ).numpy() , expected.numpy() )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = 101
UpperCAmelCase__ : Optional[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCAmelCase__ : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase__ : List[Any] = compute_token_type_ids(_snake_case , _snake_case )
np.testing.assert_array_equal(_snake_case , _snake_case )
| 79
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 424
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCAmelCase_ ( A , A = True , A = math.inf , A = -math.inf , A = math.inf , A = -math.inf , A = False , A = 1_0_0 , A = 0.01 , A = 1 , ):
'''simple docstring'''
_a : int = False
_a : Optional[Any] = search_prob
_a : List[Any] = start_temperate
_a : Any = []
_a : List[Any] = 0
_a : Union[str, Any] = None
while not search_end:
_a : Optional[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_a : Optional[Any] = current_state
scores.append(A )
iterations += 1
_a : List[Any] = None
_a : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a : Optional[Any] = random.randint(0 , len(A ) - 1 ) # picking a random neighbor
_a : Optional[int] = neighbors.pop(A )
_a : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a : Tuple = picked_neighbor
else:
_a : Dict = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a : List[str] = picked_neighbor
_a : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a : str = True
else:
_a : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A ) , A )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : str = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
UpperCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
UpperCAmelCase_ : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 424
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Tuple = use_input_mask
UpperCAmelCase : List[str] = use_token_type_ids
UpperCAmelCase : Tuple = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : List[str] = intermediate_multiple_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout
UpperCAmelCase : List[Any] = attention_dropout
UpperCAmelCase : Dict = weight_tying
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[Any] = num_choices
UpperCAmelCase : int = scope
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase : Optional[int] = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] = GPTNeoXJapaneseModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Tuple = GPTNeoXJapaneseModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = GPTNeoXJapaneseForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any = True
UpperCAmelCase : Any = GPTNeoXJapaneseForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase : int = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs
UpperCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__lowerCAmelCase : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__lowerCAmelCase : str = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] = GPTNeoXJapaneseModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = """abeja/gpt-neox-japanese-2.7b"""
UpperCAmelCase : int = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCAmelCase : Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCAmelCase : str = GPTNeoXJapaneseTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = []
for prompt in prompts:
UpperCAmelCase : List[str] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
UpperCAmelCase : str = model.generate(_SCREAMING_SNAKE_CASE , max_length=50 )
UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
predicted_outputs += generated_string
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 160
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
A: Optional[int] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def _snake_case ( UpperCamelCase : List[str]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase__ ) )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Tuple = None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase : str = dataset_module_factory(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path , dataset=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE , hash=dataset_module.hash , )
UpperCAmelCase : Optional[int] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_SCREAMING_SNAKE_CASE ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase : Tuple = cached_path(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
self.assertTrue(os.path.exists(_SCREAMING_SNAKE_CASE ) )
@pytest.mark.integration
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : str = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
UpperCAmelCase : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase )
UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase : List[str] = None
builder_instance.download_and_prepare()
UpperCAmelCase : List[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase )
UpperCAmelCase : Optional[Any] = import_main_class(dataset_module.module_path , dataset=UpperCamelCase )
UpperCAmelCase : DatasetBuilder = builder_cls(
cache_dir=UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
UpperCAmelCase : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(UpperCamelCase , UpperCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , UpperCamelCase )
assert next(iter(ds["""train"""] ) )
| 160
| 1
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase = 10**9 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : str = 2
__UpperCAmelCase : str = 0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCAmelCase : str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 487
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : List[str] = tau * frequency / samplerate
__UpperCAmelCase : Optional[int] = sin(UpperCamelCase )
__UpperCAmelCase : Any = cos(UpperCamelCase )
__UpperCAmelCase : Tuple = _sin / (2 * q_factor)
__UpperCAmelCase : Optional[Any] = (1 - _cos) / 2
__UpperCAmelCase : Any = 1 - _cos
__UpperCAmelCase : List[str] = 1 + alpha
__UpperCAmelCase : List[Any] = -2 * _cos
__UpperCAmelCase : Dict = 1 - alpha
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : int = tau * frequency / samplerate
__UpperCAmelCase : Optional[Any] = sin(UpperCamelCase )
__UpperCAmelCase : Dict = cos(UpperCamelCase )
__UpperCAmelCase : Tuple = _sin / (2 * q_factor)
__UpperCAmelCase : Dict = (1 + _cos) / 2
__UpperCAmelCase : Tuple = -1 - _cos
__UpperCAmelCase : Any = 1 + alpha
__UpperCAmelCase : int = -2 * _cos
__UpperCAmelCase : int = 1 - alpha
__UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : Tuple = tau * frequency / samplerate
__UpperCAmelCase : Tuple = sin(UpperCamelCase )
__UpperCAmelCase : List[str] = cos(UpperCamelCase )
__UpperCAmelCase : List[Any] = _sin / (2 * q_factor)
__UpperCAmelCase : Any = _sin / 2
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = -ba
__UpperCAmelCase : Dict = 1 + alpha
__UpperCAmelCase : List[str] = -2 * _cos
__UpperCAmelCase : int = 1 - alpha
__UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : str = tau * frequency / samplerate
__UpperCAmelCase : List[Any] = sin(UpperCamelCase )
__UpperCAmelCase : Any = cos(UpperCamelCase )
__UpperCAmelCase : List[str] = _sin / (2 * q_factor)
__UpperCAmelCase : Optional[Any] = 1 - alpha
__UpperCAmelCase : Tuple = -2 * _cos
__UpperCAmelCase : List[Any] = 1 + alpha
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : Dict = tau * frequency / samplerate
__UpperCAmelCase : Optional[int] = sin(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = cos(UpperCamelCase )
__UpperCAmelCase : List[Any] = _sin / (2 * q_factor)
__UpperCAmelCase : List[Any] = 10 ** (gain_db / 40)
__UpperCAmelCase : List[str] = 1 + alpha * big_a
__UpperCAmelCase : Optional[Any] = -2 * _cos
__UpperCAmelCase : int = 1 - alpha * big_a
__UpperCAmelCase : Optional[int] = 1 + alpha / big_a
__UpperCAmelCase : Union[str, Any] = -2 * _cos
__UpperCAmelCase : Dict = 1 - alpha / big_a
__UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : str = tau * frequency / samplerate
__UpperCAmelCase : int = sin(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = cos(UpperCamelCase )
__UpperCAmelCase : str = _sin / (2 * q_factor)
__UpperCAmelCase : str = 10 ** (gain_db / 40)
__UpperCAmelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
__UpperCAmelCase : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
__UpperCAmelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
__UpperCAmelCase : List[Any] = (big_a - 1) + (big_a + 1) * _cos
__UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCamelCase ) * alpha
__UpperCAmelCase : Tuple = big_a * (pmc + aaa)
__UpperCAmelCase : Union[str, Any] = 2 * big_a * mpc
__UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
__UpperCAmelCase : Any = ppmc + aaa
__UpperCAmelCase : Dict = -2 * pmpc
__UpperCAmelCase : Any = ppmc - aaa
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : Dict = tau * frequency / samplerate
__UpperCAmelCase : Optional[int] = sin(UpperCamelCase )
__UpperCAmelCase : Dict = cos(UpperCamelCase )
__UpperCAmelCase : str = _sin / (2 * q_factor)
__UpperCAmelCase : int = 10 ** (gain_db / 40)
__UpperCAmelCase : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
__UpperCAmelCase : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
__UpperCAmelCase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
__UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
__UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCamelCase ) * alpha
__UpperCAmelCase : Tuple = big_a * (ppmc + aaa)
__UpperCAmelCase : Any = -2 * big_a * pmpc
__UpperCAmelCase : int = big_a * (ppmc - aaa)
__UpperCAmelCase : int = pmc + aaa
__UpperCAmelCase : Tuple = 2 * mpc
__UpperCAmelCase : Union[str, Any] = pmc - aaa
__UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 487
| 1
|
_lowercase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 659
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase__ = k.replace(a__ , a__ )
if k.startswith("""encoder""" ):
UpperCamelCase__ = k.replace(""".attn""" , """.self_attn""" )
UpperCamelCase__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCamelCase__ = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
UpperCamelCase__ = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCamelCase__ = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
UpperCamelCase__ = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _UpperCamelCase (a__ :Any ):
"""simple docstring"""
UpperCamelCase__ = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
UpperCamelCase__ = sd.pop(a__ )
UpperCamelCase__ = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
UpperCamelCase__ = v
UpperCamelCase__ = ["START"]
@torch.no_grad()
def _UpperCamelCase (a__ :List[Any] , a__ :Dict , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = torch.load(a__ , map_location="""cpu""" )
UpperCamelCase__ = model["""model"""]
UpperCamelCase__ = BlenderbotConfig.from_json_file(a__ )
UpperCamelCase__ = BlenderbotForConditionalGeneration(a__ )
UpperCamelCase__ = m.model.state_dict().keys()
UpperCamelCase__ = []
UpperCamelCase__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase__ = rename_state_dict_key(a__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCamelCase__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(a__ )
m.model.load_state_dict(a__ , strict=a__ )
m.half()
m.save_pretrained(a__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
UpperCamelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 719
|
import inspect
import unittest
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowerCamelCase ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase__ = inspect.getmembers(__lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase__ = """k-diffusion"""
elif backend == "invisible_watermark":
UpperCamelCase__ = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 548
| 0
|
import math
import random
def snake_case (__lowercase , __lowercase = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__SCREAMING_SNAKE_CASE : int = 0.02
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
_snake_case : List[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__lowercase ):
# Forward propagation
_snake_case : int = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_snake_case : int = (expected / 100) - layer_a
# Error delta
_snake_case : str = layer_1_error * sigmoid_function(__lowercase , __lowercase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('Expected value: '))
__SCREAMING_SNAKE_CASE : Optional[Any] = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 670
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 670
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("T")
_SCREAMING_SNAKE_CASE = TypeVar("U")
class __UpperCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : Any) -> str:
A_ = key
A_ = val
A_ = None
A_ = None
def __repr__( self : List[Any]) -> str:
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next)}, has prev: {bool(self.prev)}'
)
class __UpperCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Any) -> None:
A_ = DoubleLinkedListNode(A_ , A_)
A_ = DoubleLinkedListNode(A_ , A_)
A_ , A_ = self.rear, self.head
def __repr__( self : Union[str, Any]) -> str:
A_ = ['DoubleLinkedList']
A_ = self.head
while node.next is not None:
rep.append(str(A_))
A_ = node.next
rep.append(str(self.rear))
return ",\n ".join(A_)
def __snake_case ( self : Optional[Any] , _lowercase : int) -> None:
A_ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
A_ = node
A_ = previous
A_ = node
A_ = self.rear
def __snake_case ( self : Any , _lowercase : Union[str, Any]) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
A_ = node.next
A_ = node.prev
A_ = None
A_ = None
return node
class __UpperCAmelCase ( Generic[T, U] ):
'''simple docstring'''
_UpperCamelCase = {}
def __init__( self : Optional[Any] , _lowercase : Dict) -> Tuple:
A_ = DoubleLinkedList()
A_ = capacity
A_ = 0
A_ = 0
A_ = 0
A_ = {}
def __repr__( self : Tuple) -> str:
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : Optional[Any] , _lowercase : Optional[Any]) -> bool:
return key in self.cache
def __snake_case ( self : str , _lowercase : List[Any]) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
A_ = self.cache[key]
A_ = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(A_)
return node.val
self.miss += 1
return None
def __snake_case ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : str) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
A_ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(A_) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
A_ = DoubleLinkedListNode(A_ , A_)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
A_ = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
A_ = value
self.list.add(A_)
@classmethod
def __snake_case ( cls : List[Any] , _lowercase : Dict = 128) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_lowercase : Any) -> Callable[..., U]:
def cache_decorator_wrapper(*_lowercase : str) -> U:
if func not in cls.decorator_function_to_instance_map:
A_ = LRUCache(A_)
A_ = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
A_ = func(*A_)
cls.decorator_function_to_instance_map[func].put(args[0] , A_)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(A_ , 'cache_info' , A_) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( A , A , A , A=1024 ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =[], []
UpperCAmelCase__ =list(zip(A , A ) )
UpperCAmelCase__ , UpperCAmelCase__ =sorted_examples[0]
def is_too_big(A ):
return tok(A , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ =new_src + " " + src
UpperCAmelCase__ =new_tgt + " " + tgt
if is_too_big(A ) or is_too_big(A ): # cant fit, finalize example
finished_src.append(A )
finished_tgt.append(A )
UpperCAmelCase__ , UpperCAmelCase__ =src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A )
finished_tgt.append(A )
return finished_src, finished_tgt
def _UpperCAmelCase ( A , A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =Path(A )
save_path.mkdir(exist_ok=A )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ =[x.rstrip() for x in Path(A ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ =pack_examples(A , A , A , A )
print(F"""packed {split} split from {len(A )} examples -> {len(A )}.""" )
Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(A ) )
Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(A ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ =data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(A , save_path / F"""{split}.source""" )
shutil.copyfile(A , save_path / F"""{split}.target""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=A , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=A , default=128 )
parser.add_argument("--data_dir" , type=A )
parser.add_argument("--save_path" , type=A )
UpperCAmelCase__ =parser.parse_args()
UpperCAmelCase__ =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 625
| 0
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCamelCase_ = logging.getLogger(__name__)
class UpperCamelCase_ :
def __init__( self : List[Any] ) -> Dict:
UpperCAmelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
if not self.initialized:
UpperCAmelCase_ : Tuple = RagRetriever(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
UpperCAmelCase_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
self.retriever.index.init_index()
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.retriever._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase_ (__A ):
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=None ) -> List[str]:
if index is not None and index.is_initialized() and len(_lowerCAmelCase ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , index=_lowerCAmelCase , init_retrieval=_lowerCAmelCase , )
UpperCAmelCase_ : Any = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for worker in self.retrieval_workers
] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase_ : Dict = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = ray.get(random_worker.retrieve.remote(_lowerCAmelCase , _lowerCAmelCase ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self._main_retrieve(_lowerCAmelCase , _lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCAmelCase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
return super(_lowerCAmelCase , cls ).get_tokenizers(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = kwargs.pop("config" , _lowerCAmelCase ) or RagConfig.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = RagTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
UpperCAmelCase_ : List[Any] = rag_tokenizer.question_encoder
UpperCAmelCase_ : List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase_ : List[str] = "custom"
UpperCAmelCase_ : Union[str, Any] = CustomHFIndex(config.retrieval_vector_size , _lowerCAmelCase )
else:
UpperCAmelCase_ : Dict = cls._build_index(_lowerCAmelCase )
return cls(
_lowerCAmelCase , question_encoder_tokenizer=_lowerCAmelCase , generator_tokenizer=_lowerCAmelCase , retrieval_workers=_lowerCAmelCase , index=_lowerCAmelCase , )
| 709
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''CLIPFeatureExtractor''']
lowerCamelCase_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 463
| 0
|
from collections.abc import Callable
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = a
lowercase = b
if function(__SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(__SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowercase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(__SCREAMING_SNAKE_CASE ) * function(__SCREAMING_SNAKE_CASE ) < 0:
lowercase = mid
else:
lowercase = mid
lowercase = start + (end - start) / 2.0
return mid
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 84
|
"""simple docstring"""
import math
from collections.abc import Callable
def UpperCAmelCase ( snake_case : Callable[[float], float] , snake_case : float , snake_case : float ):
_lowerCAmelCase:float = xa
_lowerCAmelCase:float = xa
while True:
if x_n == x_na or function(snake_case ) == function(snake_case ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
_lowerCAmelCase:float = x_na - (
function(snake_case ) / ((function(snake_case ) - function(snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCAmelCase:Optional[int] = x_na
_lowerCAmelCase:Dict = x_na
def UpperCAmelCase ( snake_case : float ):
return math.pow(snake_case , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 227
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __snake_case ( _UpperCAmelCase ):
__a = 384
if "tiny" in model_name:
__a = [3, 3, 9, 3]
__a = [96, 192, 384, 768]
if "small" in model_name:
__a = [3, 3, 27, 3]
__a = [96, 192, 384, 768]
if "base" in model_name:
__a = [3, 3, 27, 3]
__a = [128, 256, 512, 1024]
__a = 512
if "large" in model_name:
__a = [3, 3, 27, 3]
__a = [192, 384, 768, 1536]
__a = 768
if "xlarge" in model_name:
__a = [3, 3, 27, 3]
__a = [256, 512, 1024, 2048]
__a = 1024
# set label information
__a = 150
__a = '''huggingface/label-files'''
__a = '''ade20k-id2label.json'''
__a = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(snake_case__ ): v for k, v in idalabel.items()}
__a = {v: k for k, v in idalabel.items()}
__a = ConvNextConfig(
depths=snake_case__ , hidden_sizes=snake_case__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__a = UperNetConfig(
backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , )
return config
def __snake_case ( _UpperCAmelCase ):
__a = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = dct.pop(snake_case__ )
__a = val
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''state_dict''']
__a = get_upernet_config(snake_case__ )
__a = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a = state_dict.pop(snake_case__ )
if "bn" in key:
__a = key.replace('''bn''' , '''batch_norm''' )
__a = val
# rename keys
__a = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__a = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__a = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' )
__a = SegformerImageProcessor()
__a = processor(snake_case__ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__a = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
__a = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
__a = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
__a = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
__a = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
__a = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
__snake_case :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__snake_case :Union[str, Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60
| 0
|
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase__ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase__ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
lowerCAmelCase__ = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
lowerCAmelCase__ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase__ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase__ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 596
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-1'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-2'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-3'
lowerCAmelCase__ = 'CompVis/stable-diffusion-v1-4'
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , UpperCAmelCase_ : bool = True , ):
super()._init_()
UpperCamelCase__ : int = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : Dict = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = StableDiffusionPipeline(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , requires_safety_checker=UpperCAmelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea)
@property
def __UpperCamelCase ( self : Optional[Any]):
return {k: getattr(self , UpperCAmelCase_) for k in self.config.keys() if not k.startswith('_')}
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Optional[Union[str, int]] = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
self.enable_attention_slicing(UpperCAmelCase_)
@torch.no_grad()
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Optional[int] , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : str , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Dict , ):
return self.pipea(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
@torch.no_grad()
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Tuple , ):
UpperCamelCase__ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(UpperCAmelCase_)
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.')
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ : Dict = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ : List[str] = self.textaimg_sda_a(
prompt=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , **UpperCAmelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]])
| 596
| 1
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase__ = json.loads(a__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase__ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase__ = json.loads(a__ )
if not mpi_options.get("sagemaker_mpi_enabled" , a__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : str =field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def __UpperCAmelCase ( self):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCamelCase , )
@cached_property
def __UpperCAmelCase ( self):
logger.info("PyTorch: setting up devices")
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch")
if self.no_cuda:
lowerCamelCase__ = torch.device("cpu")
lowerCamelCase__ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase__ = smp.local_rank()
lowerCamelCase__ = torch.device("cuda" , UpperCamelCase)
lowerCamelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta)
lowerCamelCase__ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))
lowerCamelCase__ = torch.device("cuda" , self.local_rank)
lowerCamelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta)
lowerCamelCase__ = torch.device("cuda" , self.local_rank)
lowerCamelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase)
return device
@property
def __UpperCAmelCase ( self):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCAmelCase ( self):
return not is_sagemaker_model_parallel_available()
@property
def __UpperCAmelCase ( self):
return False
| 708
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 426
| 0
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Tuple:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]="attention" ) -> Union[str, Any]:
__snake_case : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__snake_case : str = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
__snake_case : List[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__snake_case : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
__snake_case : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__snake_case : Tuple = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
__snake_case : Tuple = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__snake_case : Dict = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict=False ) -> List[str]:
if split_mlp_wi:
__snake_case : Any = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__snake_case : Optional[Any] = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__snake_case : Any = (wi_a, wi_a)
else:
__snake_case : Dict = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__snake_case : Optional[Any] = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : List[str] ) -> List[str]:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def a_ ( _UpperCAmelCase : dict ,*, _UpperCAmelCase : int ,_UpperCAmelCase : bool ,_UpperCAmelCase : bool = False ) -> Dict:
__snake_case : int = traverse_util.flatten_dict(variables['target'] )
__snake_case : Dict = {'/'.join(_UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case : str = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' ,_UpperCAmelCase )
__snake_case : Any = collections.OrderedDict()
# Shared embeddings.
__snake_case : Dict = old['token_embedder/embedding']
# Encoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case : Union[str, Any] = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ,'pre_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = tax_attention_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ,'attention' )
__snake_case : Optional[int] = layer_norm
__snake_case : Union[str, Any] = k.T
__snake_case : Union[str, Any] = o.T
__snake_case : List[str] = q.T
__snake_case : Optional[Any] = v.T
# Block i, layer 1 (MLP).
__snake_case : Any = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ,'pre_mlp_layer_norm' )
__snake_case , __snake_case : int = tax_mlp_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ,_UpperCAmelCase )
__snake_case : int = layer_norm
if split_mlp_wi:
__snake_case : Any = wi[0].T
__snake_case : Union[str, Any] = wi[1].T
else:
__snake_case : Tuple = wi.T
__snake_case : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__snake_case : List[Any] = tax_relpos_bias_lookup(
_UpperCAmelCase ,_UpperCAmelCase ,'encoder' ).T
__snake_case : int = old['encoder/encoder_norm/scale']
if not scalable_attention:
__snake_case : List[str] = tax_relpos_bias_lookup(
_UpperCAmelCase ,0 ,'encoder' ).T
__snake_case : List[Any] = tax_relpos_bias_lookup(
_UpperCAmelCase ,0 ,'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case : List[str] = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'pre_self_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = tax_attention_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'self_attention' )
__snake_case : Tuple = layer_norm
__snake_case : Union[str, Any] = k.T
__snake_case : List[str] = o.T
__snake_case : List[str] = q.T
__snake_case : int = v.T
# Block i, layer 1 (Cross Attention).
__snake_case : Any = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'pre_cross_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : int = tax_attention_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'encoder_decoder_attention' )
__snake_case : Optional[int] = layer_norm
__snake_case : Any = k.T
__snake_case : Optional[Any] = o.T
__snake_case : List[Any] = q.T
__snake_case : Optional[int] = v.T
# Block i, layer 2 (MLP).
__snake_case : Any = tax_layer_norm_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,'pre_mlp_layer_norm' )
__snake_case , __snake_case : List[str] = tax_mlp_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ,_UpperCAmelCase )
__snake_case : Tuple = layer_norm
if split_mlp_wi:
__snake_case : Optional[int] = wi[0].T
__snake_case : Union[str, Any] = wi[1].T
else:
__snake_case : int = wi.T
__snake_case : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__snake_case : Union[str, Any] = tax_relpos_bias_lookup(_UpperCAmelCase ,_UpperCAmelCase ,'decoder' ).T
__snake_case : Union[str, Any] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case : int = old['decoder/logits_dense/kernel'].T
return new
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : bool ) -> List[str]:
__snake_case : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case : Any = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case : Optional[int] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__snake_case : List[str] = state_dict['shared.weight']
return state_dict
def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ) -> Tuple:
__snake_case : Any = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__snake_case : Union[str, Any] = convert_tax_to_pytorch(
_UpperCAmelCase ,num_layers=config.num_layers ,is_encoder_only=_UpperCAmelCase ,scalable_attention=_UpperCAmelCase )
__snake_case : Any = make_state_dict(_UpperCAmelCase ,_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase ,strict=_UpperCAmelCase )
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,) -> Tuple:
__snake_case : List[Any] = MTaConfig.from_json_file(_UpperCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case : Optional[Any] = UMTaEncoderModel(_UpperCAmelCase )
else:
__snake_case : str = UMTaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCAmelCase )
print('Done' )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
A__ : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 286
|
'''simple docstring'''
import qiskit
def a_ ( _UpperCAmelCase : int = 2 ) -> qiskit.result.counts.Counts:
__snake_case : Union[str, Any] = qubits
# Using Aer's simulator
__snake_case : List[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
__snake_case : Dict = qiskit.QuantumCircuit(_UpperCAmelCase ,_UpperCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 ,_UpperCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 ,_UpperCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_UpperCAmelCase ) ) ,list(range(_UpperCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__snake_case : Optional[Any] = qiskit.execute(_UpperCAmelCase ,_UpperCAmelCase ,shots=10_00 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 286
| 1
|
import argparse
import os
import re
import packaging.version
lowercase_ : Optional[int] = '''examples/'''
lowercase_ : List[Any] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowercase_ : int = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
lowercase_ : int = '''README.md'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case : Optional[int] = f.read()
_snake_case : Dict = REPLACE_PATTERNS[pattern]
_snake_case : Tuple = replace.replace('VERSION' , __lowerCAmelCase )
_snake_case : str = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern='examples' )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__( ):
_snake_case : List[Any] = '🤗 Transformers currently provides the following architectures'
_snake_case : List[Any] = '1. Want to contribute a new model?'
with open(__lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case : str = f.readlines()
# Find the start of the list.
_snake_case : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_snake_case : Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_snake_case : Tuple = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__lowerCAmelCase )
def A__( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
_snake_case : Optional[int] = f.read()
_snake_case : Tuple = REPLACE_PATTERNS['init'][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__( __lowerCAmelCase=False ):
_snake_case : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_snake_case : int = default_version.base_version
elif patch:
_snake_case : Optional[Any] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_snake_case : Any = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_snake_case : List[Any] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
_snake_case : Tuple = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
def A__( ):
_snake_case : Tuple = get_version()
_snake_case : List[Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_snake_case : List[Any] = current_version.base_version
# Check with the user we got that right.
_snake_case : Union[str, Any] = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
_snake_case : Dict = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowercase_ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652
| 0
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCamelCase (tf.keras.layers.Layer ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None )-> Optional[int]:
super().__init__()
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = max_length
__lowerCAmelCase = vocab
__lowerCAmelCase = merges
__lowerCAmelCase = BytePairTokenizer(__UpperCamelCase , __UpperCamelCase , sequence_length=__UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )-> str:
__lowerCAmelCase = [" ".join(__UpperCamelCase ) for m in tokenizer.bpe_ranks.keys()]
__lowerCAmelCase = tokenizer.get_vocab()
return cls(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )-> Any:
__lowerCAmelCase = GPTaTokenizer.from_pretrained(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
return cls.from_tokenizer(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , __UpperCamelCase )-> Optional[Any]:
return cls(**__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[int]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> Optional[int]:
__lowerCAmelCase = self.tf_tokenizer(__UpperCamelCase )
__lowerCAmelCase = tf.ones_like(__UpperCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
__lowerCAmelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
__lowerCAmelCase , __lowerCAmelCase = pad_model_inputs(
__UpperCamelCase , max_seq_length=__UpperCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 367
|
from math import pow, sqrt
def __lowerCAmelCase ( *__snake_case ):
__lowerCAmelCase = len(__snake_case ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCAmelCase ( __snake_case , __snake_case ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 367
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_snake_case = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
for attribute in key.split("." ):
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase__ = getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "running_mean":
lowercase__ = value
elif weight_type == "running_var":
lowercase__ = value
elif weight_type == "num_batches_tracked":
lowercase__ = value
elif weight_type == "inv_freq":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(__magic_name__ )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , __magic_name__ )
if "pos_bias_u" in name:
lowercase__ = None
elif "pos_bias_v" in name:
lowercase__ = None
elif "weight_g" in name:
lowercase__ = "weight_g"
elif "weight_v" in name:
lowercase__ = "weight_v"
elif "bias" in name:
lowercase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = "weight"
elif "running_mean" in name:
lowercase__ = "running_mean"
elif "inv_freq" in name:
lowercase__ = "inv_freq"
elif "running_var" in name:
lowercase__ = "running_var"
elif "num_batches_tracked" in name:
lowercase__ = "num_batches_tracked"
else:
lowercase__ = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=True ):
if config_path is not None:
lowercase__ = WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act="swish" )
else:
lowercase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase__ = "rotary"
if is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(__magic_name__ , "vocab.json" )
if not os.path.isdir(__magic_name__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 0
lowercase__ = 1
with open(__magic_name__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
lowercase__ = WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__magic_name__ , )
lowercase__ = True if config.feat_extract_norm == "layer" else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase__ = WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowercase__ = WavaVecaConformerForCTC(__magic_name__ )
else:
lowercase__ = WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase__ = argparse.Namespace(task="audio_pretraining" )
lowercase__ = fairseq.tasks.setup_task(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
lowercase__ = model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_snake_case = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 714
|
_snake_case = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_snake_case = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def _A ( __magic_name__ ):
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 611
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE_: int ='Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase_ = get_sagemaker_input()
else:
UpperCAmelCase_ = get_cluster_input()
return config
def lowerCAmelCase_ ( snake_case_ : Union[str, Any]=None ) -> Optional[int]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser("config" , description=snake_case_ )
else:
UpperCAmelCase_ = argparse.ArgumentParser("Accelerate config command" , description=snake_case_ )
parser.add_argument(
"--config_file" , default=snake_case_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = get_user_input()
if args.config_file is not None:
UpperCAmelCase_ = args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
UpperCAmelCase_ = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = config_command_parser()
UpperCAmelCase_ = parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main()
| 78
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :int , lowercase :Optional[Any] , lowercase :Optional[int]=1_3 , lowercase :Any=7 , lowercase :Tuple=True , lowercase :Optional[int]=True , lowercase :Any=False , lowercase :Any=True , lowercase :Dict=9_9 , lowercase :Dict=3_2 , lowercase :Any=5 , lowercase :Optional[Any]=4 , lowercase :List[str]=6_4 , lowercase :Optional[int]="gelu" , lowercase :int=0.1 , lowercase :str=0.1 , lowercase :List[str]=5_1_2 , lowercase :int=1_6 , lowercase :Any=2 , lowercase :Union[str, Any]=0.02 , lowercase :Optional[int]=3 , lowercase :Optional[Any]=4 , lowercase :Tuple=None , lowercase :int=2 , lowercase :Tuple=2 , lowercase :List[Any]=2 , lowercase :Optional[int]=2 , lowercase :Tuple=4 , lowercase :int=1 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = q_groups
SCREAMING_SNAKE_CASE = k_groups
SCREAMING_SNAKE_CASE = v_groups
SCREAMING_SNAKE_CASE = post_attention_groups
SCREAMING_SNAKE_CASE = intermediate_groups
SCREAMING_SNAKE_CASE = output_groups
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def snake_case__ ( self :Optional[Any] , lowercase :Optional[Any] , lowercase :int , lowercase :Any , lowercase :List[str] , lowercase :Optional[Any] , lowercase :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModel(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
SCREAMING_SNAKE_CASE = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self :Dict , lowercase :Dict , lowercase :List[Any] , lowercase :str , lowercase :Union[str, Any] , lowercase :Dict , lowercase :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self :List[str] , lowercase :Optional[Any] , lowercase :Optional[int] , lowercase :str , lowercase :int , lowercase :Optional[Any] , lowercase :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self :Any , lowercase :Optional[Any] , lowercase :List[str] , lowercase :int , lowercase :Any , lowercase :Optional[int] , lowercase :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self :str , lowercase :List[Any] , lowercase :List[str] , lowercase :Optional[int] , lowercase :Tuple , lowercase :Tuple , lowercase :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SqueezeBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self :int , lowercase :List[str] , lowercase :List[Any] , lowercase :Tuple , lowercase :str , lowercase :Optional[Any] , lowercase :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = SqueezeBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : int = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : int = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : int = True
UpperCamelCase_ : List[Any] = False
def snake_case__ ( self :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowercase , dim=3_7 )
def snake_case__ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase )
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase )
def snake_case__ ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase )
def snake_case__ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase )
def snake_case__ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase )
@slow
def snake_case__ ( self :Dict ) -> str:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SqueezeBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case__ ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE = model(lowercase )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase )
SCREAMING_SNAKE_CASE = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-4 ) )
| 201
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( a__ ):
_lowerCAmelCase = (UnCLIPScheduler,)
def __magic_name__ ( self : Tuple , **lowercase__ : Optional[Any] ):
a_ = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**lowercase__ )
return config
def __magic_name__ ( self : int ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase__ )
def __magic_name__ ( self : Optional[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase__ )
def __magic_name__ ( self : int ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=lowercase__ )
def __magic_name__ ( self : str ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase__ )
def __magic_name__ ( self : Dict ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase__ , prev_timestep=lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
a_ = scheduler_class(**lowercase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1e-5
def __magic_name__ ( self : List[str] ):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(variance_type='''learned_range''' )
a_ = scheduler_class(**lowercase__ )
a_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase__ ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=lowercase__ ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=lowercase__ ) - -0.001_0011 < 1e-5
def __magic_name__ ( self : List[Any] ):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**lowercase__ )
a_ = scheduler.timesteps
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
a_ = torch.manual_seed(0 )
for i, t in enumerate(lowercase__ ):
# 1. predict noise residual
a_ = model(lowercase__ , lowercase__ )
# 2. predict previous mean of sample x_t-1
a_ = scheduler.step(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample
a_ = pred_prev_sample
a_ = torch.sum(torch.abs(lowercase__ ) )
a_ = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def __magic_name__ ( self : str ):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**lowercase__ )
scheduler.set_timesteps(2_5 )
a_ = scheduler.timesteps
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
a_ = torch.manual_seed(0 )
for i, t in enumerate(lowercase__ ):
# 1. predict noise residual
a_ = model(lowercase__ , lowercase__ )
if i + 1 == timesteps.shape[0]:
a_ = None
else:
a_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
a_ = scheduler.step(
lowercase__ , lowercase__ , lowercase__ , prev_timestep=lowercase__ , generator=lowercase__ ).prev_sample
a_ = pred_prev_sample
a_ = torch.sum(torch.abs(lowercase__ ) )
a_ = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def __magic_name__ ( self : Tuple ):
pass
def __magic_name__ ( self : Any ):
pass
| 143
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowercase :
def __init__( self : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]=1_3 , lowercase__ : int=7 , lowercase__ : Dict=True , lowercase__ : List[Any]=True , lowercase__ : int=True , lowercase__ : Optional[Any]=True , lowercase__ : Union[str, Any]=9_9 , lowercase__ : Any=6_4 , lowercase__ : int=3_2 , lowercase__ : str=5 , lowercase__ : List[str]=4 , lowercase__ : str=3_7 , lowercase__ : Tuple="gelu" , lowercase__ : Any=0.1 , lowercase__ : Any=0.1 , lowercase__ : Dict=5_1_2 , lowercase__ : List[Any]=1_6 , lowercase__ : List[str]=2 , lowercase__ : Dict=0.02 , lowercase__ : List[Any]=3 , lowercase__ : List[Any]=4 , lowercase__ : Optional[int]=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = embedding_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def __magic_name__ ( self : List[Any] ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Optional[Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def __magic_name__ ( self : List[str] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : str ):
a_ = MegatronBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
a_ = model(lowercase__ , token_type_ids=lowercase__ )
a_ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : int ):
a_ = MegatronBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[str] ):
a_ = MegatronBertForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : int , lowercase__ : str , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] ):
a_ = MegatronBertForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Tuple , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int ):
a_ = MegatronBertForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , next_sentence_label=lowercase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : int , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : int , lowercase__ : List[Any] ):
a_ = MegatronBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : str , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[str] ):
a_ = self.num_labels
a_ = MegatronBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[str] , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
a_ = self.num_labels
a_ = MegatronBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
a_ = self.num_choices
a_ = MegatronBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[Any] ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
# test_resize_embeddings = False
_lowerCAmelCase = False
def __magic_name__ ( self : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any=False ):
a_ = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase__ )
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def __magic_name__ ( self : Any ):
a_ = MegatronBertModelTester(self )
a_ = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 )
def __magic_name__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase__ )
def __magic_name__ ( self : List[str] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase__ )
def __magic_name__ ( self : Tuple ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase__ )
def __magic_name__ ( self : int ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase__ )
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
UpperCamelCase__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Union[str, Any] ):
a_ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
a_ = os.path.join(os.environ['''MYDIR'''] , lowercase__ )
a_ = MegatronBertModel.from_pretrained(lowercase__ )
model.to(lowercase__ )
model.half()
a_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
a_ = model(lowercase__ )[0]
a_ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , lowercase__ )
a_ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
a_ = output[0, ii, jj]
a_ = expected[3 * ii + jj]
a_ = '''ii={} jj={} a={} b={}'''.format(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(math.isclose(lowercase__ , lowercase__ , rel_tol=lowercase__ , abs_tol=lowercase__ ) , msg=lowercase__ )
| 143
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> Tuple:
__snake_case = 1.5
__snake_case = int(factor * num_class_images )
__snake_case = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=__snake_case , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=__snake_case )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
__snake_case = client.query(text=__snake_case )
if len(__snake_case ) >= factor * num_class_images or num_images > 1e4:
break
else:
__snake_case = int(factor * num_images )
__snake_case = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=__snake_case , aesthetic_weight=0.1 , )
__snake_case = 0
__snake_case = 0
__snake_case = tqdm(desc="downloading real regularization images" , total=__snake_case )
with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
f"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
__snake_case = class_images[count]
count += 1
try:
__snake_case = requests.get(images["url"] )
if img.status_code == 200:
__snake_case = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCamelCase( ) -> Tuple:
__snake_case = argparse.ArgumentParser("" , add_help=__snake_case )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=__snake_case , type=__snake_case )
parser.add_argument("--class_data_dir" , help="path to save images" , required=__snake_case , type=__snake_case )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=__snake_case )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 524
|
def _lowerCamelCase( __snake_case ) -> float:
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCamelCase( __snake_case ) -> float:
if edge <= 0 or not isinstance(__snake_case , __snake_case ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524
| 1
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowerCAmelCase_ :
"""simple docstring"""
pass
| 567
|
def _a ( __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = int(__lowercase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__lowercase )
__UpperCamelCase , __UpperCamelCase = divmod(__lowercase , 2 )
return binary_recursive(__lowercase ) + str(__lowercase )
def _a ( __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = str(__lowercase ).strip()
if not number:
raise ValueError('No input value was provided' )
__UpperCamelCase = '-' if number.startswith('-' ) else ''
__UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"""{negative}0b{binary_recursive(int(__lowercase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 567
| 1
|
'''simple docstring'''
def A_( A : int):
if not isinstance(A , A):
UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(A)
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : str = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ (_UpperCAmelCase=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a)
self.assertTrue(os.path.exists(a))
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase)
assert next(iter(ds['train']))
| 73
| 0
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__UpperCamelCase : List[str] = get_logger(__name__)
class __UpperCamelCase :
def __init__( self : List[str] , _lowerCAmelCase : Optional[str] = None ) -> Tuple:
"""simple docstring"""
__lowercase = (
os.path.join(_lowerCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowercase = Extractor
def _a ( self : str , _lowerCAmelCase : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowercase = os.path.abspath(_lowerCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_lowerCAmelCase ) )
def _a ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(_lowerCAmelCase ) and not (os.path.isdir(_lowerCAmelCase ) and os.listdir(_lowerCAmelCase ))
)
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ) -> str:
"""simple docstring"""
__lowercase = self.extractor.infer_extractor_format(_lowerCAmelCase )
if not extractor_format:
return input_path
__lowercase = self._get_output_path(_lowerCAmelCase )
if self._do_extract(_lowerCAmelCase , _lowerCAmelCase ):
self.extractor.extract(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return output_path
class __UpperCamelCase ( _lowerCAmelCase ):
@classmethod
@abstractmethod
def _a ( cls : List[str] , _lowerCAmelCase : Union[Path, str] , **_lowerCAmelCase : str ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
...
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
__snake_case :List[bytes] = []
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
with open(_lowerCAmelCase , """rb""" ) as f:
return f.read(_lowerCAmelCase )
@classmethod
def _a ( cls : Any , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__lowercase = max(len(_lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowercase = cls.read_magic_number(_lowerCAmelCase , _lowerCAmelCase )
except OSError:
return False
return any(magic_number.startswith(_lowerCAmelCase ) for cls_magic_number in cls.magic_numbers )
class __UpperCamelCase ( _lowerCAmelCase ):
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Union[Path, str] , **_lowerCAmelCase : str ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(_lowerCAmelCase )
@staticmethod
def _a ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
def resolved(_lowerCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_lowerCAmelCase ) )
def badpath(_lowerCAmelCase : str , _lowerCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ).startswith(_lowerCAmelCase )
def badlink(_lowerCAmelCase : Any , _lowerCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowercase = resolved(os.path.join(_lowerCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_lowerCAmelCase )
__lowercase = resolved(_lowerCAmelCase )
for finfo in members:
if badpath(finfo.name , _lowerCAmelCase ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(_lowerCAmelCase , _lowerCAmelCase ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(_lowerCAmelCase , _lowerCAmelCase ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowercase = tarfile.open(_lowerCAmelCase )
tar_file.extractall(_lowerCAmelCase , members=TarExtractor.safemembers(_lowerCAmelCase , _lowerCAmelCase ) )
tar_file.close()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = [B'\x1F\x8B']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(_lowerCAmelCase , """rb""" ) as gzip_file:
with open(_lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Dict = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def _a ( cls : Optional[Any] , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(_lowerCAmelCase , magic_number=_lowerCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_lowerCAmelCase , """rb""" ) as fp:
__lowercase = _EndRecData(_lowerCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowercase = fp.read(_lowerCAmelCase ) # CD is where we expect it to be
if len(_lowerCAmelCase ) == sizeCentralDir:
__lowercase = struct.unpack(_lowerCAmelCase , _lowerCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with zipfile.ZipFile(_lowerCAmelCase , """r""" ) as zip_file:
zip_file.extractall(_lowerCAmelCase )
zip_file.close()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(_lowerCAmelCase ) as compressed_file:
with open(_lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__lowercase = rarfile.RarFile(_lowerCAmelCase )
rf.extractall(_lowerCAmelCase )
rf.close()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Dict = [B'\x28\xb5\x2F\xFD']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__lowercase = zstd.ZstdDecompressor()
with open(_lowerCAmelCase , """rb""" ) as ifh, open(_lowerCAmelCase , """wb""" ) as ofh:
dctx.copy_stream(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = [B'\x42\x5A\x68']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(_lowerCAmelCase , """rb""" ) as compressed_file:
with open(_lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with pyazr.SevenZipFile(_lowerCAmelCase , """r""" ) as archive:
archive.extractall(_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = [B'\x04\x22\x4D\x18']
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(_lowerCAmelCase , """rb""" ) as compressed_file:
with open(_lowerCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
class __UpperCamelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__snake_case :Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _a ( cls : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return max(
len(_lowerCAmelCase )
for extractor in cls.extractors.values()
if issubclass(_lowerCAmelCase , _lowerCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _a ( _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(_lowerCAmelCase , magic_number_length=_lowerCAmelCase )
except OSError:
return b""
@classmethod
def _a ( cls : str , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=_lowerCAmelCase , )
__lowercase = cls.infer_extractor_format(_lowerCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _a ( cls : Optional[int] , _lowerCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__lowercase = cls._get_magic_number_max_length()
__lowercase = cls._read_magic_number(_lowerCAmelCase , _lowerCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_lowerCAmelCase , magic_number=_lowerCAmelCase ):
return extractor_format
@classmethod
def _a ( cls : List[str] , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Union[Path, str] , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(_lowerCAmelCase ) , exist_ok=_lowerCAmelCase )
# Prevent parallel extractions
__lowercase = str(Path(_lowerCAmelCase ).with_suffix(""".lock""" ) )
with FileLock(_lowerCAmelCase ):
shutil.rmtree(_lowerCAmelCase , ignore_errors=_lowerCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_lowerCAmelCase , _lowerCAmelCase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=_lowerCAmelCase , )
__lowercase = extractor if extractor != """deprecated""" else extractor_format
else:
__lowercase = cls.extractors[extractor_format]
return extractor.extract(_lowerCAmelCase , _lowerCAmelCase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_lowerCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_lowerCAmelCase ):
return extractor.extract(_lowerCAmelCase , _lowerCAmelCase )
| 53
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 1
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCamelCase (a_ :Optional[Any]) -> Optional[Any]:
lowercase :Union[str, Any] = image.size
lowercase :Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase :str = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''])
lowercase :List[str] = np.array(a_).astype(np.floataa) / 255.0
lowercase :str = image[None].transpose(0 , 3 , 1 , 2)
lowercase :Dict = torch.from_numpy(a_)
return 2.0 * image - 1.0
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : List[Any] , snake_case__ : Dict = None , snake_case__ : List[Any] = 1 , snake_case__ : Optional[int] = 1_0_0 , snake_case__ : int = 0.0 , snake_case__ : List[str] = None , snake_case__ : str = "pil" , snake_case__ : Optional[int] = True , ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
lowercase :int = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
lowercase :Dict = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_SCREAMING_SNAKE_CASE )}""" )
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
lowercase :Optional[int] = preprocess(_SCREAMING_SNAKE_CASE )
lowercase :str = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase :Any = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase :Any = next(self.unet.parameters() ).dtype
lowercase :Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
lowercase :str = image.to(device=self.device , dtype=_SCREAMING_SNAKE_CASE )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
lowercase :Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase :int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase :Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase :Optional[Any] = {}
if accepts_eta:
lowercase :int = eta
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# concat latents and low resolution image in the channel dimension.
lowercase :Union[str, Any] = torch.cat([latents, image] , dim=1 )
lowercase :List[Any] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# predict the noise residual
lowercase :int = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase :str = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VQVAE
lowercase :str = self.vqvae.decode(_SCREAMING_SNAKE_CASE ).sample
lowercase :Tuple = torch.clamp(_SCREAMING_SNAKE_CASE , -1.0 , 1.0 )
lowercase :Optional[Any] = image / 2 + 0.5
lowercase :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase :Dict = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 677
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 768 , )->Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
A_ : Any = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->Tuple:
'''simple docstring'''
A_ : Optional[Any] = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
A_ : str = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : List[str] = (embeds * self.std) + self.mean
return embeds
| 590
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "dandelin/vilt-b32-finetuned-vqa"
a__ = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
a__ = "image_qa"
a__ = AutoProcessor
a__ = AutoModelForVisualQuestionAnswering
a__ = ["image", "text"]
a__ = ["text"]
def __init__( self , *__snake_case , **__snake_case):
requires_backends(self , ['vision'])
super().__init__(*__snake_case , **__snake_case)
def A__ ( self , __snake_case , __snake_case):
return self.pre_processor(__snake_case , __snake_case , return_tensors='pt')
def A__ ( self , __snake_case):
with torch.no_grad():
return self.model(**__snake_case).logits
def A__ ( self , __snake_case):
_UpperCamelCase : Tuple = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 648
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "bert"
def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case)
_UpperCamelCase : int = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : Tuple = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Any = use_cache
_UpperCamelCase : Any = classifier_dropout
class lowercase ( _lowercase ):
"""simple docstring"""
@property
def A__ ( self):
if self.task == "multiple-choice":
_UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 648
| 1
|
'''simple docstring'''
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
A : Union[str, Any] = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
A : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A : List[Any] = dict(zip(vocab, range(len(vocab))))
A : List[Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
A : int = Path(tmpdirname)
A : Tuple = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
A : List[str] = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
A : str = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
A : Dict = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
A : Dict = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
A : Any = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
A : Union[str, Any] = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
A : Any = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 349
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
snake_case_ : Optional[int] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
snake_case_ : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = SqueezeBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**lowerCamelCase__ )
UpperCamelCase = do_lower_case
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 212
| 0
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = "The Nymphenburg Palace is a beautiful palace in Munich!"
def lowerCAmelCase__ ( a_ : str , a_ : str ) -> Union[str, Any]:
UpperCAmelCase__ : List[Any] = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_0_2_4,
'''hidden_size''': 7_6_8,
'''max_length''': 5_1_2,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_0_2_4,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
UpperCAmelCase__ : Optional[Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase__ : Optional[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=a_ , output_all_encodings=a_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , a_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase__ : Dict = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase__ : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
UpperCAmelCase__ : List[str] = _load_vocab(a_ , a_ , a_ , cls=a_ )
UpperCAmelCase__ : Optional[Any] = nlp.model.BERTModel(
a_ , len(a_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=a_ , use_token_type_embed=a_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=a_ , use_decoder=a_ , )
original_bort.load_parameters(a_ , cast_dtype=a_ , ignore_extra=a_ )
UpperCAmelCase__ : Tuple = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase__ : Optional[int] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(a_ ),
}
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_dict(a_ )
UpperCAmelCase__ : Optional[int] = BertForMaskedLM(a_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a_ : int ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a_ : List[Any] , a_ : Union[str, Any] ):
UpperCAmelCase__ : Optional[int] = hf_param.shape
UpperCAmelCase__ : Union[str, Any] = to_torch(params[gluon_param] )
UpperCAmelCase__ : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
UpperCAmelCase__ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
UpperCAmelCase__ : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
UpperCAmelCase__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
UpperCAmelCase__ : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase__ : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase__ : BertSelfAttention = layer.attention.self
UpperCAmelCase__ : List[str] = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
UpperCAmelCase__ : Union[str, Any] = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
UpperCAmelCase__ : Dict = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
UpperCAmelCase__ : str = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
UpperCAmelCase__ : int = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
UpperCAmelCase__ : Optional[int] = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
UpperCAmelCase__ : BertSelfOutput = layer.attention.output
UpperCAmelCase__ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
UpperCAmelCase__ : List[Any] = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
UpperCAmelCase__ : int = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
UpperCAmelCase__ : Any = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
UpperCAmelCase__ : BertIntermediate = layer.intermediate
UpperCAmelCase__ : Union[str, Any] = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
UpperCAmelCase__ : Dict = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
UpperCAmelCase__ : BertOutput = layer.output
UpperCAmelCase__ : Optional[Any] = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
UpperCAmelCase__ : Optional[int] = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
UpperCAmelCase__ : Dict = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
UpperCAmelCase__ : Dict = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase__ : List[Any] = RobertaTokenizer.from_pretrained('''roberta-base''' )
UpperCAmelCase__ : List[str] = tokenizer.encode_plus(a_ )['''input_ids''']
# Get gluon output
UpperCAmelCase__ : Dict = mx.nd.array([input_ids] )
UpperCAmelCase__ : Any = original_bort(inputs=a_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a_ )
UpperCAmelCase__ : List[Any] = BertModel.from_pretrained(a_ )
hf_bort_model.eval()
UpperCAmelCase__ : List[str] = tokenizer.encode_plus(a_ , return_tensors='''pt''' )
UpperCAmelCase__ : str = hf_bort_model(**a_ )[0]
UpperCAmelCase__ : Tuple = output_gluon[0].asnumpy()
UpperCAmelCase__ : Dict = output_hf[0].detach().numpy()
UpperCAmelCase__ : Union[str, Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase__ : Optional[int] = np.allclose(a_ , a_ , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , a_ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 599
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def lowerCAmelCase__ ( a_ : Tuple , a_ : Dict , a_ : Tuple ) -> List[str]:
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(repo_id=a_ , path=a_ , revision=a_ )
assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a_ )}"""
| 599
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "mvp"
__a = ["past_key_values"]
__a = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowerCAmelCase=50267 , lowerCAmelCase=1024 , lowerCAmelCase=12 , lowerCAmelCase=4096 , lowerCAmelCase=16 , lowerCAmelCase=12 , lowerCAmelCase=4096 , lowerCAmelCase=16 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase="gelu" , lowerCAmelCase=1024 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=False , lowerCAmelCase=100 , lowerCAmelCase=800 , **lowerCAmelCase , ) -> Dict:
SCREAMING_SNAKE_CASE__: List[Any]= vocab_size
SCREAMING_SNAKE_CASE__: Optional[int]= max_position_embeddings
SCREAMING_SNAKE_CASE__: int= d_model
SCREAMING_SNAKE_CASE__: Optional[Any]= encoder_ffn_dim
SCREAMING_SNAKE_CASE__: Dict= encoder_layers
SCREAMING_SNAKE_CASE__: Optional[Any]= encoder_attention_heads
SCREAMING_SNAKE_CASE__: List[str]= decoder_ffn_dim
SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_layers
SCREAMING_SNAKE_CASE__: Dict= decoder_attention_heads
SCREAMING_SNAKE_CASE__: Any= dropout
SCREAMING_SNAKE_CASE__: str= attention_dropout
SCREAMING_SNAKE_CASE__: List[Any]= activation_dropout
SCREAMING_SNAKE_CASE__: Optional[int]= activation_function
SCREAMING_SNAKE_CASE__: Optional[int]= init_std
SCREAMING_SNAKE_CASE__: List[str]= encoder_layerdrop
SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_layerdrop
SCREAMING_SNAKE_CASE__: List[Any]= classifier_dropout
SCREAMING_SNAKE_CASE__: List[Any]= use_cache
SCREAMING_SNAKE_CASE__: Dict= encoder_layers
SCREAMING_SNAKE_CASE__: List[Any]= scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__: int= use_prompt
SCREAMING_SNAKE_CASE__: List[Any]= prompt_length
SCREAMING_SNAKE_CASE__: str= prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
| 64
|
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 670
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=4 , ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = num_choices
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class UpperCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCamelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE : int = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Dict = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 718
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase :Tuple = 16
_lowerCAmelCase :List[Any] = 32
def __lowerCAmelCase ( a_ , a_ , a_ , a_ , a_ = 16 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
'train': dataset['train'].select(a_ ),
'validation': dataset['train'].select(a_ ),
'test': dataset['validation'],
} )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : Any = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Any = 8
else:
SCREAMING_SNAKE_CASE : List[Any] = None
return tokenizer.pad(
a_ , padding='longest' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets['test'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCAmelCase ( a_ , a_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = []
# Download the dataset
SCREAMING_SNAKE_CASE : Any = load_dataset('glue' , 'mrpc' )
# Create our splits
SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Dict = config['lr']
SCREAMING_SNAKE_CASE : Tuple = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE : Any = int(config['seed'] )
SCREAMING_SNAKE_CASE : Any = int(config['batch_size'] )
SCREAMING_SNAKE_CASE : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE : int = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(a_ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
SCREAMING_SNAKE_CASE : List[str] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(a_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_fold_dataloaders(
a_ , a_ , a_ , a_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Dict = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=a_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE : Dict = model(**a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.loss
SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**a_ )
SCREAMING_SNAKE_CASE : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a_ , references=a_ , )
SCREAMING_SNAKE_CASE : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , a_ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE : Any = []
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**a_ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(a_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE : int = torch.cat(a_ , dim=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(a_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = metric.compute(predictions=a_ , references=a_ )
accelerator.print('Average test metrics from all folds:' , a_ )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a_ , default=a_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=a_ , default=3 , help='The number of splits to perform across the dataset' )
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 179
| 0
|
'''simple docstring'''
from maths.prime_check import is_prime
def lowerCAmelCase_ ( __A : int ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case: Any = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_SCREAMING_SNAKE_CASE )
if is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a_ = "http://www.mocksite.com/file1.txt"
a_ = "\"text\": [\"foo\", \"foo\"]"
a_ = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class snake_case :
__UpperCamelCase = 200
__UpperCamelCase = {'Content-Length': '100'}
__UpperCamelCase = {}
def a_ ( self : Dict , **a__ : Tuple ) -> List[str]:
'''simple docstring'''
return [bytes(a__ , "utf-8" )]
def a__ ( *__lowercase , **__lowercase ) -> Any:
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Tuple:
import requests
monkeypatch.setattr(__lowercase , "request" , __lowercase )
_A = URL
if issubclass(__lowercase , __lowercase ):
_A = url
elif issubclass(__lowercase , __lowercase ):
_A = [url]
elif issubclass(__lowercase , __lowercase ):
_A = {"train": url}
_A = "dummy"
_A = "downloads"
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(__lowercase , __lowercase ) , use_etag=__lowercase , )
_A = DownloadManager(dataset_name=__lowercase , download_config=__lowercase )
_A = dl_manager.download(__lowercase )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowercase , __lowercase ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(__lowercase , __lowercase ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowercase , __lowercase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(__lowercase )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
_A = str(__lowercase )
if issubclass(__lowercase , __lowercase ):
_A = filename
elif issubclass(__lowercase , __lowercase ):
_A = [filename]
elif issubclass(__lowercase , __lowercase ):
_A = {"train": filename}
_A = "dummy"
_A = xz_file.parent
_A = "extracted"
_A = DownloadConfig(
cache_dir=__lowercase , use_etag=__lowercase , )
_A = DownloadManager(dataset_name=__lowercase , download_config=__lowercase )
_A = dl_manager.extract(__lowercase )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowercase , __lowercase ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(__lowercase , __lowercase ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowercase , __lowercase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(__lowercase )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowercase , etag=__lowercase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def a__ ( __lowercase , __lowercase ) -> Optional[Any]:
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__lowercase , start=1 ):
_A = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def a__ ( __lowercase , __lowercase ) -> List[Any]:
_A = request.getfixturevalue(__lowercase )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowercase ) , start=1 ):
_test_jsonl(__lowercase , __lowercase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def a__ ( __lowercase , __lowercase ) -> List[Any]:
_A = request.getfixturevalue(__lowercase )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowercase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowercase ) , start=1 ):
_test_jsonl(__lowercase , __lowercase )
assert num_tar == 1
assert num_jsonl == 2
def a__ ( __lowercase ) -> Dict:
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowercase ) , start=1 ):
assert os.path.basename(__lowercase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 621
|
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
if attention_mask is None:
snake_case_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
snake_case_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
snake_case_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Tuple=1_3 , _UpperCamelCase : Union[str, Any]=7 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Tuple=False , _UpperCamelCase : str=9_9 , _UpperCamelCase : List[str]=1_6 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Union[str, Any]=4 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Union[str, Any]=3_2 , _UpperCamelCase : int=2 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : Union[str, Any]=0.02 , ) ->Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = initializer_range
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
snake_case_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
snake_case_ = shift_tokens_right(_UpperCamelCase , 1 , 2 )
snake_case_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
snake_case_ = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def snake_case__( self : List[Any] ) ->int:
snake_case_, snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ) ->Optional[int]:
snake_case_ = 2_0
snake_case_ = model_class_name(_UpperCamelCase )
snake_case_ = model.encode(inputs_dict['''input_ids'''] )
snake_case_, snake_case_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case_ = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
snake_case_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
snake_case_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
snake_case_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case_ = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
snake_case_ = model.decode(_UpperCamelCase , _UpperCamelCase )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) ->Tuple:
snake_case_ = 2_0
snake_case_ = model_class_name(_UpperCamelCase )
snake_case_ = model.encode(inputs_dict['''input_ids'''] )
snake_case_, snake_case_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case_ = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
snake_case_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
snake_case_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
snake_case_ = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
snake_case_ = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 99
def snake_case__( self : Tuple ) ->Any:
snake_case_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
snake_case_ = input_ids.shape[0]
snake_case_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__( self : Any ) ->Tuple:
snake_case_, snake_case_, snake_case_ = self._get_config_and_data()
snake_case_ = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
snake_case_ = lm_model(input_ids=_UpperCamelCase )
snake_case_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->Optional[Any]:
snake_case_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
snake_case_ = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
snake_case_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
snake_case_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
snake_case_ = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
snake_case_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def snake_case__( self : Tuple ) ->List[str]:
snake_case_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
snake_case_ = shift_tokens_right(_UpperCamelCase , 1 , 2 )
snake_case_ = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
snake_case_ = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_ ( __A , unittest.TestCase , __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE : List[str] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__( self : List[str] ) ->Dict:
snake_case_ = FlaxBlenderbotModelTester(self )
def snake_case__( self : int ) ->Any:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def snake_case__( self : List[Any] ) ->Optional[int]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase : str , _UpperCamelCase : Union[str, Any]=None , **_UpperCamelCase : List[str] ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
snake_case_ = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case_ = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__( self : List[str] ) ->Any:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
snake_case_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
snake_case_ = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case_ = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__( self : Dict ) ->Optional[Any]:
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
snake_case_ = np.ones((1, 1) ) * model.config.eos_token_id
snake_case_ = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
snake_case_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
snake_case_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_UpperCamelCase )
snake_case_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
snake_case_ = ['''Sam''']
snake_case_ = tokenizer(_UpperCamelCase , return_tensors='''jax''' )
snake_case_ = model.generate(**_UpperCamelCase , **_UpperCamelCase )
snake_case_ = '''Sam is a great name. It means "sun" in Gaelic.'''
snake_case_ = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 39
|
from bisect import bisect
from itertools import accumulate
def _a ( __UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
lowerCAmelCase__ : int = sorted(zip(__UpperCamelCase ,__UpperCamelCase ) ,key=lambda __UpperCamelCase : x[0] / x[1] ,reverse=__UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ : Tuple = list(accumulate(__UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = bisect(__UpperCamelCase ,__UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
| 0
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a__ = TypeVar('KT')
a__ = TypeVar('VT')
class _UpperCamelCase( Generic[KT, VT] ):
def __init__( self : str , _lowerCamelCase : KT | str = "root" , _lowerCamelCase : VT | None = None ):
_UpperCAmelCase : int = key
_UpperCAmelCase : str = value
_UpperCAmelCase : list[Node[KT, VT]] = []
def __repr__( self : List[str] ):
return f"""Node({self.key}: {self.value})"""
@property
def a__ ( self : str ):
return len(self.forward )
class _UpperCamelCase( Generic[KT, VT] ):
def __init__( self : List[Any] , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = 16 ):
_UpperCAmelCase : Node[KT, VT] = Node[KT, VT]()
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : List[str] = p
_UpperCAmelCase : Any = max_level
def __str__( self : Any ):
_UpperCAmelCase : Dict = list(self )
if len(_lowerCamelCase ) == 0:
return f"""SkipList(level={self.level})"""
_UpperCAmelCase : Tuple = max((len(str(_lowerCamelCase ) ) for item in items) , default=4 )
_UpperCAmelCase : int = max(_lowerCamelCase , 4 ) + 4
_UpperCAmelCase : int = self.head
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(_lowerCamelCase , "-" ) + "* " * len(_lowerCamelCase ) )
lines.append(" " * label_size + "| " * len(_lowerCamelCase ) )
while len(node.forward ) != 0:
_UpperCAmelCase : List[str] = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(_lowerCamelCase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(_lowerCamelCase ) )
_UpperCAmelCase : Optional[Any] = node.forward
lines.append("None".ljust(_lowerCamelCase ) + "* " * len(_lowerCamelCase ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(_lowerCamelCase )
def __iter__( self : Any ):
_UpperCAmelCase : List[Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_UpperCAmelCase : Optional[Any] = node.forward[0]
def a__ ( self : Dict ):
_UpperCAmelCase : List[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def a__ ( self : Optional[int] , _lowerCamelCase : Dict ):
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_UpperCAmelCase : Union[str, Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowerCamelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def a__ ( self : Any , _lowerCamelCase : KT ):
_UpperCAmelCase : Dict = self._locate_node(_lowerCamelCase )
if node is not None:
for i, update_node in enumerate(_lowerCamelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_UpperCAmelCase : List[str] = node.forward[i]
else:
_UpperCAmelCase : str = update_node.forward[:i]
def a__ ( self : List[str] , _lowerCamelCase : KT , _lowerCamelCase : VT ):
_UpperCAmelCase : str = self._locate_node(_lowerCamelCase )
if node is not None:
_UpperCAmelCase : Optional[Any] = value
else:
_UpperCAmelCase : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowerCamelCase ):
update_vector.append(self.head )
_UpperCAmelCase : Dict = level
_UpperCAmelCase : Tuple = Node(_lowerCamelCase , _lowerCamelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowerCamelCase )
else:
_UpperCAmelCase : int = new_node
def a__ ( self : Optional[Any] , _lowerCamelCase : VT ):
_UpperCAmelCase : Optional[int] = self._locate_node(_lowerCamelCase )
if node is not None:
return node.value
return None
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 1_2 )
skip_list.insert("Key3" , 4_1 )
skip_list.insert("Key4" , -1_9 )
_UpperCAmelCase : str = skip_list.head
_UpperCAmelCase : Any = {}
while node.level != 0:
_UpperCAmelCase : int = node.forward[0]
_UpperCAmelCase : Optional[Any] = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = SkipList()
skip_list.insert("Key1" , 1_0 )
skip_list.insert("Key1" , 1_2 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 1_0 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 1_0 )
_UpperCAmelCase : Optional[int] = skip_list.head
_UpperCAmelCase : str = {}
while node.level != 0:
_UpperCAmelCase : Union[str, Any] = node.forward[0]
_UpperCAmelCase : str = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = SkipList()
assert skip_list.find("Some key" ) is None
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = SkipList()
skip_list.insert("Key2" , 2_0 )
assert skip_list.find("Key2" ) == 2_0
skip_list.insert("Some Key" , 1_0 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 1_3 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 1_0
assert skip_list.find("V" ) == 1_3
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase : str = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 1_4
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 1_2
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 1_5
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = SkipList()
skip_list.insert("Key1" , 1_2 )
skip_list.insert("V" , 1_3 )
skip_list.insert("X" , 1_4_2 )
skip_list.insert("Key2" , 1_5 )
skip_list.delete("X" )
def traverse_keys(_SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
def is_sorted(_SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
_UpperCAmelCase : int = SkipList()
for i in range(1_0 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar('KEY')
__lowerCamelCase = TypeVar('VAL')
@dataclass(frozen=SCREAMING_SNAKE_CASE , slots=SCREAMING_SNAKE_CASE )
class _UpperCamelCase( Generic[KEY, VAL] ):
__A: KEY
__A: VAL
class _UpperCamelCase( _Item ):
def __init__( self : Optional[Any] ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __bool__( self : Optional[int] ):
return False
__lowerCamelCase = _DeletedItem()
class _UpperCamelCase( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[Any] , _lowerCamelCase : int = 8 , _lowerCamelCase : float = 0.75 ):
_UpperCAmelCase : List[str] = initial_block_size
_UpperCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCAmelCase : Optional[Any] = capacity_factor
_UpperCAmelCase : List[Any] = 0
def a__ ( self : List[Any] , _lowerCamelCase : KEY ):
return hash(_lowerCamelCase ) % len(self._buckets )
def a__ ( self : List[str] , _lowerCamelCase : int ):
return (ind + 1) % len(self._buckets )
def a__ ( self : Any , _lowerCamelCase : int , _lowerCamelCase : KEY , _lowerCamelCase : VAL ):
_UpperCAmelCase : Optional[Any] = self._buckets[ind]
if not stored:
_UpperCAmelCase : Union[str, Any] = _Item(_lowerCamelCase , _lowerCamelCase )
self._len += 1
return True
elif stored.key == key:
_UpperCAmelCase : List[Any] = _Item(_lowerCamelCase , _lowerCamelCase )
return True
else:
return False
def a__ ( self : Any ):
_UpperCAmelCase : Dict = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_lowerCamelCase )
def a__ ( self : Any ):
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCAmelCase : Tuple = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ ( self : Union[str, Any] , _lowerCamelCase : int ):
_UpperCAmelCase : Dict = self._buckets
_UpperCAmelCase : Union[str, Any] = [None] * new_size
_UpperCAmelCase : Union[str, Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def a__ ( self : Any ):
self._resize(len(self._buckets ) * 2 )
def a__ ( self : int ):
self._resize(len(self._buckets ) // 2 )
def a__ ( self : Any , _lowerCamelCase : KEY ):
_UpperCAmelCase : Any = self._get_bucket_index(_lowerCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCAmelCase : Union[str, Any] = self._get_next_ind(_lowerCamelCase )
def a__ ( self : Union[str, Any] , _lowerCamelCase : KEY , _lowerCamelCase : VAL ):
for ind in self._iterate_buckets(_lowerCamelCase ):
if self._try_set(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
break
def __setitem__( self : Optional[int] , _lowerCamelCase : KEY , _lowerCamelCase : VAL ):
if self._is_full():
self._size_up()
self._add_item(_lowerCamelCase , _lowerCamelCase )
def __delitem__( self : int , _lowerCamelCase : KEY ):
for ind in self._iterate_buckets(_lowerCamelCase ):
_UpperCAmelCase : List[str] = self._buckets[ind]
if item is None:
raise KeyError(_lowerCamelCase )
if item is _deleted:
continue
if item.key == key:
_UpperCAmelCase : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , _lowerCamelCase : KEY ):
for ind in self._iterate_buckets(_lowerCamelCase ):
_UpperCAmelCase : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_lowerCamelCase )
def __len__( self : List[str] ):
return self._len
def __iter__( self : List[str] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Tuple ):
_UpperCAmelCase : Optional[int] = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 328
| 0
|
import numpy as np
from transformers import Pipeline
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = np.max(lowercase , axis=-1 , keepdims=lowercase )
lowerCamelCase_ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Tuple , **A_ : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = {}
if "second_text" in kwargs:
lowerCamelCase_ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def a__ ( self : Union[str, Any] , A_ : List[str] , A_ : int=None ) -> str:
"""simple docstring"""
return self.tokenizer(A_ , text_pair=A_ , return_tensors=self.framework )
def a__ ( self : List[str] , A_ : int ) -> Optional[Any]:
"""simple docstring"""
return self.model(**A_ )
def a__ ( self : Optional[Any] , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = model_outputs.logits[0].numpy()
lowerCamelCase_ = softmax(A_ )
lowerCamelCase_ = np.argmax(A_ )
lowerCamelCase_ = self.model.config.idalabel[best_class]
lowerCamelCase_ = probabilities[best_class].item()
lowerCamelCase_ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 70
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a :Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a :str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : Tuple = value
return new_state_dict
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : str = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[256:512]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias_cross_attn[-256:]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = 800 if """detection""" in checkpoint_url else 1000
SCREAMING_SNAKE_CASE__ : List[str] = target_max_size / current_max_size
SCREAMING_SNAKE_CASE__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = F.to_tensor(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
logger.info("""Converting model...""" )
# load original state dict
SCREAMING_SNAKE_CASE__ : str = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ : Optional[int] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = 15
SCREAMING_SNAKE_CASE__ : Any = 2
SCREAMING_SNAKE_CASE__ : str = {0: """table""", 1: """table rotated"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE__ : Tuple = 125
SCREAMING_SNAKE_CASE__ : str = 6
SCREAMING_SNAKE_CASE__ : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
SCREAMING_SNAKE_CASE__ : Any = idalabel
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE__ : Tuple = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE__ : Dict = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
SCREAMING_SNAKE_CASE__ : Tuple = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = Image.open(__lowerCAmelCase ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Dict = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : List[Any] = (1, 15, 3)
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
SCREAMING_SNAKE_CASE__ : Dict = (1, 125, 7)
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
SCREAMING_SNAKE_CASE__ : List[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
a :Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a :int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 680
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""PoolFormerFeatureExtractor"""]
__lowercase = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 706
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
A_ = 4
A_ = (1 << p) - 1
for _ in range(p - 2 ):
A_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 563
| 0
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = True ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
A_ : List[Any] = timm.create_model("""levit_128s""" ,pretrained=__snake_case )
else:
A_ : Union[str, Any] = timm.create_model("""levit_128""" ,pretrained=__snake_case )
if hidden_sizes == 1_9_2:
A_ : Union[str, Any] = timm.create_model("""levit_192""" ,pretrained=__snake_case )
if hidden_sizes == 2_5_6:
A_ : Union[str, Any] = timm.create_model("""levit_256""" ,pretrained=__snake_case )
if hidden_sizes == 3_8_4:
A_ : Optional[int] = timm.create_model("""levit_384""" ,pretrained=__snake_case )
from_model.eval()
A_ : Union[str, Any] = LevitForImageClassificationWithTeacher(__snake_case ).eval()
A_ : Union[str, Any] = OrderedDict()
A_ : Union[str, Any] = from_model.state_dict()
A_ : List[str] = list(from_model.state_dict().keys() )
A_ : Union[str, Any] = list(our_model.state_dict().keys() )
print(len(__snake_case ) ,len(__snake_case ) )
for i in range(len(__snake_case ) ):
A_ : int = weights[og_keys[i]]
our_model.load_state_dict(__snake_case )
A_ : Optional[Any] = torch.randn((2, 3, 2_2_4, 2_2_4) )
A_ : int = from_model(__snake_case )
A_ : List[str] = our_model(__snake_case ).logits
assert torch.allclose(__snake_case ,__snake_case ), "The model logits don't match the original one."
A_ : Optional[Any] = name
print(__snake_case )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
A_ : Tuple = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = True ):
'''simple docstring'''
A_ : Optional[Any] = """imagenet-1k-id2label.json"""
A_ : Union[str, Any] = 1_0_0_0
A_ : int = (1, num_labels)
A_ : int = """huggingface/label-files"""
A_ : List[Any] = num_labels
A_ : Dict = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
A_ : List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
A_ : Tuple = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
A_ : int = partial(__snake_case ,num_labels=__snake_case ,idalabel=__snake_case ,labelaid=__snake_case )
A_ : List[Any] = {
"""levit-128S""": 1_2_8,
"""levit-128""": 1_2_8,
"""levit-192""": 1_9_2,
"""levit-256""": 2_5_6,
"""levit-384""": 3_8_4,
}
A_ : str = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[1_6, 1_6, 1_6] ,drop_path_rate=0 ,),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] ,num_attention_heads=[4, 8, 1_2] ,depths=[4, 4, 4] ,key_dim=[1_6, 1_6, 1_6] ,drop_path_rate=0 ,),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[3_2, 3_2, 3_2] ,drop_path_rate=0 ,),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[3_2, 3_2, 3_2] ,drop_path_rate=0 ,),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] ,num_attention_heads=[6, 9, 1_2] ,depths=[4, 4, 4] ,key_dim=[3_2, 3_2, 3_2] ,drop_path_rate=0.1 ,),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] ,__snake_case ,names_to_config[model_name] ,__snake_case ,__snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 569
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCamelCase__ ( a_):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = []
def a__ ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , **UpperCamelCase_ : int ):
'''simple docstring'''
self.events.append('on_init_end' )
def a__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , **UpperCamelCase_ : str ):
'''simple docstring'''
self.events.append('on_train_begin' )
def a__ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.events.append('on_train_end' )
def a__ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.events.append('on_epoch_begin' )
def a__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_epoch_end' )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
self.events.append('on_step_begin' )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_step_end' )
def a__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Dict ):
'''simple docstring'''
self.events.append('on_evaluate' )
def a__ ( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ):
'''simple docstring'''
self.events.append('on_predict' )
def a__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , **UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
self.events.append('on_save' )
def a__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
self.events.append('on_log' )
def a__ ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , **UpperCamelCase_ : List[Any] ):
'''simple docstring'''
self.events.append('on_prediction_step' )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = tempfile.mkdtemp()
def a__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def a__ ( self : List[str] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[int]=6_4 , UpperCamelCase_ : Optional[int]=6_4 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[Any]=False , **UpperCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = RegressionDataset(length=UpperCamelCase_ )
__magic_name__ = RegressionDataset(length=UpperCamelCase_ )
__magic_name__ = RegressionModelConfig(a=UpperCamelCase_ , b=UpperCamelCase_ )
__magic_name__ = RegressionPreTrainedModel(UpperCamelCase_ )
__magic_name__ = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase_ , report_to=[] , **UpperCamelCase_ )
return Trainer(
UpperCamelCase_ , UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , callbacks=UpperCamelCase_ , )
def a__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
'''simple docstring'''
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
# Order doesn't matter
__magic_name__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
__magic_name__ = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , cba.__class__ )
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(cba.__class__ , UpperCamelCase_ )
else:
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = ['on_init_end', 'on_train_begin']
__magic_name__ = 0
__magic_name__ = len(trainer.get_eval_dataloader() )
__magic_name__ = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(UpperCamelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def a__ ( self : List[Any] ):
'''simple docstring'''
__magic_name__ = self.get_trainer()
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# Callbacks passed at init are added to the default callbacks
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__magic_name__ = self.get_trainer(disable_tqdm=UpperCamelCase_ )
__magic_name__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__magic_name__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(cb.__class__ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# We can also add, pop, or remove by instance
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
__magic_name__ = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def a__ ( self : Any ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=UpperCamelCase_ )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# Independent log/save/eval
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# A bit of everything
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase_ ) in warn_mock.call_args[0][0]
| 545
| 0
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : str = 13
SCREAMING_SNAKE_CASE : Any = 7
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = 99
SCREAMING_SNAKE_CASE : Any = 32
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : Union[str, Any] = 37
SCREAMING_SNAKE_CASE : Dict = "gelu"
SCREAMING_SNAKE_CASE : List[Any] = 0.1
SCREAMING_SNAKE_CASE : int = 0.1
SCREAMING_SNAKE_CASE : Optional[int] = 512
SCREAMING_SNAKE_CASE : str = 16
SCREAMING_SNAKE_CASE : Dict = 2
SCREAMING_SNAKE_CASE : Optional[int] = 0.02
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
SCREAMING_SNAKE_CASE : Optional[int] = None
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = TFDistilBertModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : List[Any] = TFDistilBertForMaskedLM(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = TFDistilBertForQuestionAnswering(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = TFDistilBertForSequenceClassification(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : Dict = self.num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = TFDistilBertForMultipleChoice(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFDistilBertForTokenClassification(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ : Any = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : Optional[int] = False
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = TFDistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , dim=37 )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
@slow
def _A ( self : str ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
SCREAMING_SNAKE_CASE : Tuple = TFDistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[Any] = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE : int = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
| 709
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 488
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _snake_case ( A_ ):
lowerCamelCase__: torch.FloatTensor
class _snake_case ( A_ , A_ ):
@register_to_config
def __init__( self: Any , __lowerCamelCase: int = 32 , __lowerCamelCase: int = 64 , __lowerCamelCase: int = 20 , __lowerCamelCase: int = 7_68 , __lowerCamelCase: Optional[int]=77 , __lowerCamelCase: Optional[Any]=4 , __lowerCamelCase: float = 0.0 , __lowerCamelCase: str = "silu" , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = "linear" , __lowerCamelCase: Optional[str] = "prd" , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[int] = None , ) -> Optional[int]:
super().__init__()
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : int = attention_head_dim
__UpperCAmelCase : str = num_attention_heads * attention_head_dim
__UpperCAmelCase : Optional[Any] = additional_embeddings
__UpperCAmelCase : Optional[int] = time_embed_dim or inner_dim
__UpperCAmelCase : List[str] = embedding_proj_dim or embedding_dim
__UpperCAmelCase : List[Any] = clip_embed_dim or embedding_dim
__UpperCAmelCase : str = Timesteps(_snake_case , _snake_case , 0 )
__UpperCAmelCase : List[str] = TimestepEmbedding(_snake_case , _snake_case , out_dim=_snake_case , act_fn=_snake_case )
__UpperCAmelCase : Optional[int] = nn.Linear(_snake_case , _snake_case )
if embedding_proj_norm_type is None:
__UpperCAmelCase : List[Any] = None
elif embedding_proj_norm_type == "layer":
__UpperCAmelCase : Optional[Any] = nn.LayerNorm(_snake_case )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
__UpperCAmelCase : Optional[int] = nn.Linear(_snake_case , _snake_case )
if encoder_hid_proj_type is None:
__UpperCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
__UpperCAmelCase : Optional[int] = nn.Linear(_snake_case , _snake_case )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
__UpperCAmelCase : str = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _snake_case ) )
if added_emb_type == "prd":
__UpperCAmelCase : Optional[int] = nn.Parameter(torch.zeros(1 , 1 , _snake_case ) )
elif added_emb_type is None:
__UpperCAmelCase : Dict = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
__UpperCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_snake_case , _snake_case , _snake_case , dropout=_snake_case , activation_fn="gelu" , attention_bias=_snake_case , )
for d in range(_snake_case )
] )
if norm_in_type == "layer":
__UpperCAmelCase : List[str] = nn.LayerNorm(_snake_case )
elif norm_in_type is None:
__UpperCAmelCase : Optional[Any] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
__UpperCAmelCase : List[Any] = nn.LayerNorm(_snake_case )
__UpperCAmelCase : Optional[Any] = nn.Linear(_snake_case , _snake_case )
__UpperCAmelCase : Tuple = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__UpperCAmelCase : List[Any] = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , _snake_case , persistent=_snake_case )
__UpperCAmelCase : Any = nn.Parameter(torch.zeros(1 , _snake_case ) )
__UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(1 , _snake_case ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowerCamelCase ( self: Optional[int] ) -> Dict[str, AttentionProcessor]:
__UpperCAmelCase : Optional[int] = {}
def fn_recursive_add_processors(__lowerCamelCase: str , __lowerCamelCase: torch.nn.Module , __lowerCamelCase: Dict[str, AttentionProcessor] ):
if hasattr(_snake_case , "set_processor" ):
__UpperCAmelCase : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , _snake_case , _snake_case )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_snake_case , _snake_case , _snake_case )
return processors
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> int:
__UpperCAmelCase : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_snake_case )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__lowerCamelCase: str , __lowerCamelCase: torch.nn.Module , __lowerCamelCase: List[Any] ):
if hasattr(_snake_case , "set_processor" ):
if not isinstance(_snake_case , _snake_case ):
module.set_processor(_snake_case )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , _snake_case , _snake_case )
for name, module in self.named_children():
fn_recursive_attn_processor(_snake_case , _snake_case , _snake_case )
def _lowerCamelCase ( self: Optional[Any] ) -> Dict:
self.set_attn_processor(AttnProcessor() )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Union[torch.Tensor, float, int] , __lowerCamelCase: torch.FloatTensor , __lowerCamelCase: Optional[torch.FloatTensor] = None , __lowerCamelCase: Optional[torch.BoolTensor] = None , __lowerCamelCase: bool = True , ) -> int:
__UpperCAmelCase : Tuple = hidden_states.shape[0]
__UpperCAmelCase : str = timestep
if not torch.is_tensor(_snake_case ):
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : int = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Dict = timesteps * torch.ones(_snake_case , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase : Dict = self.time_proj(_snake_case )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCAmelCase : List[Any] = timesteps_projected.to(dtype=self.dtype )
__UpperCAmelCase : int = self.time_embedding(_snake_case )
if self.embedding_proj_norm is not None:
__UpperCAmelCase : Tuple = self.embedding_proj_norm(_snake_case )
__UpperCAmelCase : Optional[Any] = self.embedding_proj(_snake_case )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCAmelCase : Tuple = self.encoder_hidden_states_proj(_snake_case )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
__UpperCAmelCase : List[Any] = self.proj_in(_snake_case )
__UpperCAmelCase : Any = self.positional_embedding.to(hidden_states.dtype )
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : int = 0
if encoder_hidden_states is not None:
additional_embeds.append(_snake_case )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCAmelCase : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCAmelCase : Union[str, Any] = hidden_states[:, None, :]
__UpperCAmelCase : Tuple = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCAmelCase : str = self.prd_embedding.to(hidden_states.dtype ).expand(_snake_case , -1 , -1 )
additional_embeds.append(_snake_case )
__UpperCAmelCase : int = torch.cat(
_snake_case , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCAmelCase : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCAmelCase : List[str] = F.pad(
_snake_case , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCAmelCase : str = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__UpperCAmelCase : Optional[int] = F.pad(_snake_case , (0, self.additional_embeddings) , value=0.0 )
__UpperCAmelCase : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCAmelCase : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCAmelCase : Tuple = self.norm_in(_snake_case )
for block in self.transformer_blocks:
__UpperCAmelCase : Optional[Any] = block(_snake_case , attention_mask=_snake_case )
__UpperCAmelCase : Dict = self.norm_out(_snake_case )
if self.prd_embedding is not None:
__UpperCAmelCase : str = hidden_states[:, -1]
else:
__UpperCAmelCase : Optional[int] = hidden_states[:, additional_embeddings_len:]
__UpperCAmelCase : List[str] = self.proj_to_clip_embeddings(_snake_case )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_snake_case )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Tuple ) -> Optional[int]:
__UpperCAmelCase : Tuple = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 382
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = "efficientnet"
def __init__( self : Optional[Any] ,_snake_case : int = 3 ,_snake_case : int = 600 ,_snake_case : float = 2.0 ,_snake_case : float = 3.1 ,_snake_case : int = 8 ,_snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] ,_snake_case : List[int] = [32, 16, 24, 40, 80, 112, 192] ,_snake_case : List[int] = [16, 24, 40, 80, 112, 192, 320] ,_snake_case : List[int] = [] ,_snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] ,_snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] ,_snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] ,_snake_case : float = 0.25 ,_snake_case : str = "swish" ,_snake_case : int = 2_560 ,_snake_case : str = "mean" ,_snake_case : float = 0.02 ,_snake_case : float = 0.001 ,_snake_case : float = 0.99 ,_snake_case : float = 0.5 ,_snake_case : float = 0.2 ,**_snake_case : List[str] ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_snake_case )
lowercase__ : Optional[Any] = num_channels
lowercase__ : Any = image_size
lowercase__ : Optional[Any] = width_coefficient
lowercase__ : Dict = depth_coefficient
lowercase__ : Optional[int] = depth_divisor
lowercase__ : Optional[int] = kernel_sizes
lowercase__ : str = in_channels
lowercase__ : Any = out_channels
lowercase__ : Union[str, Any] = depthwise_padding
lowercase__ : str = strides
lowercase__ : List[str] = num_block_repeats
lowercase__ : List[str] = expand_ratios
lowercase__ : List[str] = squeeze_expansion_ratio
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Any = hidden_dim
lowercase__ : Optional[int] = pooling_type
lowercase__ : List[str] = initializer_range
lowercase__ : List[Any] = batch_norm_eps
lowercase__ : List[Any] = batch_norm_momentum
lowercase__ : Tuple = dropout_rate
lowercase__ : Tuple = drop_connect_rate
lowercase__ : Union[str, Any] = sum(_snake_case ) * 4
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1e-5
| 560
| 0
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ():
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
super().__init__()
_a : Optional[Any] = nn.Linear(3 ,4 )
_a : str = nn.BatchNormad(4 )
_a : int = nn.Linear(4 ,5 )
def __lowercase ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : int ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a ,[128, 64, 32, 16, 8] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Dict = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : str ,_a : Any ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_a : Optional[int] = mock_training_loop_function('hello' )
self.assertListEqual(_a ,[128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] ,[8, 'hello'] )
def __lowercase ( self : str ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_a : Dict ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' ,cm.exception.args[0] )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a : Optional[int] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' ,cm.exception.args[0] )
def __lowercase ( self : str ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : List[str] ,_a : Dict ,_a : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(128 ,'hello' ,'world' )
self.assertIn('Batch size was passed into `f`' ,cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' ,cm.exception.args[0] )
def __lowercase ( self : Tuple ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a : Any ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' ,cm.exception.args[0] )
@require_cuda
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = torch.cuda.memory_allocated()
_a : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() ,_a )
_a : int = release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated() ,_a )
| 720
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Union[str, Any] ,_a : Union[str, Any]=13 ,_a : Any=32 ,_a : Optional[Any]=2 ,_a : Any=3 ,_a : str=16 ,_a : Tuple=[1, 2, 1] ,_a : Tuple=[2, 2, 4] ,_a : Any=2 ,_a : Optional[int]=2.0 ,_a : List[Any]=True ,_a : str=0.0 ,_a : Tuple=0.0 ,_a : Optional[Any]=0.1 ,_a : Dict="gelu" ,_a : Union[str, Any]=False ,_a : Any=True ,_a : Any=0.02 ,_a : List[Any]=1E-5 ,_a : Any=True ,_a : List[str]=None ,_a : str=True ,_a : Optional[int]=10 ,_a : List[str]=8 ,):
'''simple docstring'''
_a : Dict = parent
_a : str = batch_size
_a : Optional[int] = image_size
_a : str = patch_size
_a : Optional[int] = num_channels
_a : List[Any] = embed_dim
_a : Optional[Any] = depths
_a : Optional[int] = num_heads
_a : str = window_size
_a : Any = mlp_ratio
_a : Optional[Any] = qkv_bias
_a : Optional[Any] = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : Union[str, Any] = drop_path_rate
_a : Union[str, Any] = hidden_act
_a : Union[str, Any] = use_absolute_embeddings
_a : str = patch_norm
_a : Tuple = layer_norm_eps
_a : List[Any] = initializer_range
_a : Optional[int] = is_training
_a : str = scope
_a : List[str] = use_labels
_a : int = type_sequence_label_size
_a : List[str] = encoder_stride
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : List[Any] = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : str ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ,_a : Any ):
'''simple docstring'''
_a : List[Any] = SwinvaModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a )
_a : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def __lowercase ( self : Union[str, Any] ,_a : List[str] ,_a : Tuple ,_a : List[Any] ):
'''simple docstring'''
_a : int = SwinvaForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a : List[Any] = 1
_a : str = SwinvaForMaskedImageModeling(_a )
model.to(_a )
model.eval()
_a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self : Any ,_a : List[Any] ,_a : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.type_sequence_label_size
_a : str = SwinvaForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = self.prepare_config_and_inputs()
_a, _a, _a : int = config_and_inputs
_a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCAmelCase : Any = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = False
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = SwinvaModelTester(self )
_a : Any = ConfigTester(self ,config_class=_a ,embed_dim=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
_a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = True
for model_class in self.all_model_classes:
_a : Optional[Any] = True
_a : List[str] = False
_a : Tuple = True
_a : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs.attentions
_a : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(_a ) ,_a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Optional[int] = True
_a : Dict = config.window_size**2
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a ,_a ) )
_a : Union[str, Any] = outputs.attentions
self.assertEqual(len(_a ) ,_a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
_a : str = len(_a )
# Check attention is always last and order is fine
_a : int = True
_a : int = True
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(_a ,_a ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
_a : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_a : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states ,len(_a ) )
_a : str = outputs.attentions
self.assertEqual(len(_a ) ,_a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def __lowercase ( self : Optional[int] ,_a : str ,_a : Union[str, Any] ,_a : Any ,_a : Union[str, Any] ):
'''simple docstring'''
_a : int = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(_a ,_a ) )
_a : Any = outputs.hidden_states
_a : str = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_a ) ,_a )
# Swinv2 has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
_a : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(_a ) ,_a )
_a, _a, _a, _a : Optional[int] = reshaped_hidden_states[0].shape
_a : str = (
reshaped_hidden_states[0].view(_a ,_a ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : List[Any] = True
self.check_hidden_states_output(_a ,_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Any = True
self.check_hidden_states_output(_a ,_a ,_a ,_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[Any] = 3
_a : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : int = True
self.check_hidden_states_output(_a ,_a ,_a ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
self.check_hidden_states_output(_a ,_a ,_a ,(padded_height, padded_width) )
def __lowercase ( self : str ):
'''simple docstring'''
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : str = SwinvaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : int = model_class(config=_a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
_a )
_a : Union[str, Any] = self.default_image_processor
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a : Dict = image_processor(images=_a ,return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
_a : Dict = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
_a : int = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_a ,atol=1E-4 ) )
| 319
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.