code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__lowerCamelCase : Any = 5_00_00
__lowerCamelCase : Tuple = 50_00
__lowerCamelCase , __lowerCamelCase : List[str] = os.path.split(__file__)
__lowerCamelCase : List[str] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _snake_case ( lowerCAmelCase : datasets.Dataset , lowerCAmelCase : Any ):
"""simple docstring"""
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = dataset[i]
@get_duration
def _snake_case ( lowerCAmelCase : datasets.Dataset , lowerCAmelCase : Optional[int] , lowerCAmelCase : str ):
"""simple docstring"""
for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict = dataset[i : i + batch_size]
@get_duration
def _snake_case ( lowerCAmelCase : datasets.Dataset , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
with dataset.formatted_as(type=__UpperCamelCase ):
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset[i]
@get_duration
def _snake_case ( lowerCAmelCase : datasets.Dataset , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ):
"""simple docstring"""
with dataset.formatted_as(type=__UpperCamelCase ):
for i in range(0 , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : str = dataset[i : i + batch_size]
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {'num examples': SPEED_TEST_N_EXAMPLES}
SCREAMING_SNAKE_CASE_ : str = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
SCREAMING_SNAKE_CASE_ : List[str] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
SCREAMING_SNAKE_CASE_ : str = generate_example_dataset(
os.path.join(__UpperCamelCase , "dataset.arrow" ) , __UpperCamelCase , num_examples=__UpperCamelCase , seq_shapes={"list": (1_0_0,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = func(__UpperCamelCase , **__UpperCamelCase )
print("shuffling dataset" )
SCREAMING_SNAKE_CASE_ : str = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = func(
__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , "wb" ) as f:
f.write(json.dumps(__UpperCamelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 18
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = len([g for position, g in enumerate(__UpperCamelCase ) if g == main_target[position]] )
return (item, float(__UpperCamelCase ))
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = random.randint(0 ,len(__UpperCamelCase ) - 1 )
SCREAMING_SNAKE_CASE : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: list[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = list(__UpperCamelCase )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Optional[Any] = random.choice(__UpperCamelCase )
return "".join(__UpperCamelCase )
def lowercase__( __UpperCamelCase: tuple[str, float] ,__UpperCamelCase: list[tuple[str, float]] ,__UpperCamelCase: list[str] ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : Optional[Any] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 ,__UpperCamelCase )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = crossover(parent_a[0] ,__UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCamelCase ,__UpperCamelCase ) )
pop.append(mutate(__UpperCamelCase ,__UpperCamelCase ) )
return pop
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: list[str] ,__UpperCamelCase: bool = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[str] = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : List[Any] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__UpperCamelCase )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Optional[Any] = []
for _ in range(__UpperCamelCase ):
population.append(''.join([random.choice(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : Optional[int] = [evaluate(__UpperCamelCase ,__UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x[1] ,reverse=__UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCamelCase )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : str = [
(item, score / len(__UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCamelCase ):
population.extend(select(population_score[int(__UpperCamelCase )] ,__UpperCamelCase ,__UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
UpperCamelCase_ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 251
| 0
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 197
|
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase=None , **UpperCAmelCase ) -> Any:
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , UpperCAmelCase , )
super().__init__(args=UpperCAmelCase , **UpperCAmelCase )
| 197
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( __a , unittest.TestCase ):
__a : Any = KandinskyVaaImgaImgPipeline
__a : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image"""]
__a : int = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__a : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__a : Union[str, Any] = False
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def A ( self : Dict ):
'''simple docstring'''
return 32
@property
def A ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def A ( self : Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : Optional[int] ):
'''simple docstring'''
return 100
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**lowercase )
return model
@property
def A ( self : Any ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase = DDIMScheduler(**lowercase )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : Optional[int] , lowercase : int , lowercase : List[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(lowercase ) ).convert('''RGB''' ).resize((256, 256) )
if str(lowercase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowercase )
else:
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
UpperCAmelCase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def A ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase = '''A red cartoon frog, 4k'''
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 34
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase__ : List[Any] = pd.read_csv('''sample_data.csv''', header=None)
lowercase__ : Union[str, Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase__ : Any = df.iloc[:, 1:2]
lowercase__ : int = actual_data.values.reshape(len_data, 1)
lowercase__ : int = MinMaxScaler().fit_transform(actual_data)
lowercase__ : Dict = 10
lowercase__ : List[str] = 5
lowercase__ : Dict = 20
lowercase__ : Dict = len_data - periods * look_back
lowercase__ : Any = actual_data[:division]
lowercase__ : Optional[int] = actual_data[division - look_back :]
lowercase__ , lowercase__ : Optional[Any] = [], []
lowercase__ , lowercase__ : Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase__ : List[Any] = np.array(train_x)
lowercase__ : List[Any] = np.array(test_x)
lowercase__ : str = np.array([list(i.ravel()) for i in train_y])
lowercase__ : Any = np.array([list(i.ravel()) for i in test_y])
lowercase__ : Optional[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
lowercase__ : Tuple = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
lowercase__ : int = model.predict(x_test)
| 190
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a__ = 16
a__ = 32
def __UpperCAmelCase ( __a : Accelerator ,__a : int = 16 ,__a : str = "bert-base-cased" ) -> int:
"""simple docstring"""
_a : int = AutoTokenizer.from_pretrained(__a )
_a : Optional[Any] = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__a : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a : int = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__a ,max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_a : Optional[Any] = datasets.map(
__a ,batched=__a ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : Union[str, Any] = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__a : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(__a ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
_a : Dict = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__a ,collate_fn=__a ,batch_size=__a )
_a : List[str] = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__a ,collate_fn=__a ,batch_size=__a )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( __a : str ,__a : Tuple ) -> List[str]:
"""simple docstring"""
_a : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[int] = config['''lr''']
_a : Optional[int] = int(config['''num_epochs'''] )
_a : Optional[Any] = int(config['''seed'''] )
_a : Union[str, Any] = int(config['''batch_size'''] )
_a : List[str] = args.model_name_or_path
set_seed(__a )
_a : List[Any] = get_dataloaders(__a ,__a ,__a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : str = AutoModelForSequenceClassification.from_pretrained(__a ,return_dict=__a )
# Instantiate optimizer
_a : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_a : Union[str, Any] = optimizer_cls(params=model.parameters() ,lr=__a )
if accelerator.state.deepspeed_plugin is not None:
_a : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_a : str = 1
_a : Optional[Any] = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_a : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__a ,num_warmup_steps=0 ,num_training_steps=__a ,)
else:
_a : Optional[int] = DummyScheduler(__a ,total_num_steps=__a ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a : str = accelerator.prepare(
__a ,__a ,__a ,__a ,__a )
# We need to keep track of how many total steps we have iterated over
_a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_a : Dict = 0
# Now we train the model
_a : Dict = evaluate.load('''glue''' ,'''mrpc''' )
_a : Union[str, Any] = 0
_a : int = {}
for epoch in range(__a ,__a ):
model.train()
for step, batch in enumerate(__a ):
_a : List[Any] = model(**__a )
_a : Any = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_a : Dict = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_a : Tuple = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
_a : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a ,references=__a ,)
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,__a )
_a : List[Any] = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
_a : Union[str, Any] = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''all_results.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=__a ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__a ,)
parser.add_argument(
'''--output_dir''' ,type=__a ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--performance_lower_bound''' ,type=__a ,default=__a ,help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=__a ,default=3 ,help='''Number of train epochs.''' ,)
_a : Union[str, Any] = parser.parse_args()
_a : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a ,__a )
if __name__ == "__main__":
main()
| 367
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15
| 0
|
import pytest
import datasets
# Import fixture modules as plugins
lowercase__ : Any = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.getbasetemp() / "cache"
snake_case_ = test_hf_cache_home / "datasets"
snake_case_ = test_hf_cache_home / "metrics"
snake_case_ = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_A ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_A ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_A ) )
snake_case_ = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_A ) )
snake_case_ = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_A ) )
@pytest.fixture(autouse=_A , scope="session" )
def lowerCamelCase__ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _A )
@pytest.fixture
def lowerCamelCase__ ( _A ):
'''simple docstring'''
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _A )
| 187
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True , _A="pt" ):
'''simple docstring'''
snake_case_ = {"add_prefix_space": True} if isinstance(_A , _A ) and not line.startswith(" " ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=_A , padding="max_length" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def lowerCamelCase__ ( _A , _A , _A=None , ):
'''simple docstring'''
snake_case_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple="train" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
snake_case_ = Path(__lowercase ).joinpath(type_path + ".source" )
snake_case_ = Path(__lowercase ).joinpath(type_path + ".target" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip("\n" )
snake_case_ = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
snake_case_ = encode_line(__lowercase , __lowercase , self.max_source_length , "right" )
snake_case_ = encode_line(__lowercase , __lowercase , self.max_target_length , "right" )
snake_case_ = source_inputs["input_ids"].squeeze()
snake_case_ = target_inputs["input_ids"].squeeze()
snake_case_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( __lowercase : Optional[int] ):
"""simple docstring"""
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = torch.stack([x["input_ids"] for x in batch] )
snake_case_ = torch.stack([x["attention_mask"] for x in batch] )
snake_case_ = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(__lowercase , __lowercase )
snake_case_ , snake_case_ = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
snake_case_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase__ : str = getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_git_info()
save_json(_A , os.path.join(_A , "git_log.json" ) )
def lowerCamelCase__ ( _A , _A , _A=4 , **_A ):
'''simple docstring'''
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=_A )
snake_case_ = {
"repo_id": str(_A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return list(map(_A , _A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with open(_A , "wb" ) as f:
return pickle.dump(_A , _A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return re.sub(R"\b(a|an|the)\b" , " " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = normalize_answer(_A ).split()
snake_case_ = normalize_answer(_A ).split()
snake_case_ = Counter(_A ) & Counter(_A )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert len(_A ) == len(_A )
snake_case_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = "dropout_rate"
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_A ) )
delattr(_A , _A )
continue
snake_case_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 187
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE_ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __SCREAMING_SNAKE_CASE ( ) -> Any:
_UpperCAmelCase : List[Any] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_UpperCAmelCase : Union[str, Any] = get_sagemaker_input()
else:
_UpperCAmelCase : Any = get_cluster_input()
return config
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int=None ) -> Optional[Any]:
if subparsers is not None:
_UpperCAmelCase : Any = subparsers.add_parser("config" , description=lowerCAmelCase )
else:
_UpperCAmelCase : List[str] = argparse.ArgumentParser("Accelerate config command" , description=lowerCAmelCase )
parser.add_argument(
"--config_file" , default=lowerCAmelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[Any]:
_UpperCAmelCase : Any = get_user_input()
if args.config_file is not None:
_UpperCAmelCase : Union[str, Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_UpperCAmelCase : Dict = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F'accelerate configuration saved at {config_file}' )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_UpperCAmelCase : List[str] = config_command_parser()
_UpperCAmelCase : Tuple = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 189
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: int=False ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[str]=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : Optional[Any] = ""
else:
_UpperCAmelCase : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Dict = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : int = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Optional[int]:
_UpperCAmelCase : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict ) -> Tuple:
_UpperCAmelCase : str = dct.pop(lowerCAmelCase )
_UpperCAmelCase : Any = val
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple , lowerCAmelCase: int , lowerCAmelCase: List[Any]=False ) -> Any:
_UpperCAmelCase : List[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowerCAmelCase , )
_UpperCAmelCase : Optional[Any] = ViTHybridConfig(backbone_config=lowerCAmelCase , image_size=384 , num_labels=1000 )
_UpperCAmelCase : str = False
# load original model from timm
_UpperCAmelCase : Optional[Any] = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : str = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase )
_UpperCAmelCase : str = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : str = "huggingface/label-files"
_UpperCAmelCase : Tuple = "imagenet-1k-id2label.json"
_UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Any = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Dict = idalabel
_UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase : Union[str, Any] = ViTHybridModel(lowerCAmelCase ).eval()
else:
_UpperCAmelCase : Optional[Any] = ViTHybridForImageClassification(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# create image processor
_UpperCAmelCase : Any = create_transform(**resolve_data_config({} , model=lowerCAmelCase ) )
_UpperCAmelCase : Tuple = transform.transforms
_UpperCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_UpperCAmelCase : Any = ViTHybridImageProcessor(
do_resize=lowerCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : List[Any] = transform(lowerCAmelCase ).unsqueeze(0 )
_UpperCAmelCase : Optional[Any] = processor(lowerCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase , lowerCAmelCase )
# verify logits
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase : List[Any] = timm_model.forward_features(lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_UpperCAmelCase : Any = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 189
| 1
|
from __future__ import annotations
snake_case : List[Any] = tuple[int, int, int]
snake_case : Union[str, Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
snake_case : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
snake_case : Optional[Any] = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
snake_case : str = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
snake_case : List[Any] = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
snake_case : Dict = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
snake_case : int = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
snake_case : int = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
snake_case : List[str] = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
snake_case : Any = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
snake_case : int = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
snake_case : Dict = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def __lowerCamelCase ( UpperCAmelCase_ : RotorPositionT , UpperCAmelCase_ : RotorSelectionT , UpperCAmelCase_ : str ):
"""simple docstring"""
if (unique_rotsel := len(set(UpperCAmelCase_ ) )) < 3:
a :str = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(UpperCAmelCase_ )
# Checks if rotor positions are valid
a , a , a :Optional[int] = rotpos
if not 0 < rotorposa <= len(UpperCAmelCase_ ):
a :int = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(UpperCAmelCase_ )
if not 0 < rotorposa <= len(UpperCAmelCase_ ):
a :int = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(UpperCAmelCase_ )
if not 0 < rotorposa <= len(UpperCAmelCase_ ):
a :Optional[Any] = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(UpperCAmelCase_ )
# Validates string and returns dict
a :Dict = _plugboard(UpperCAmelCase_ )
return rotpos, rotsel, pbdict
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :str = F'''Plugboard setting isn\'t type string ({type(UpperCAmelCase_ )})'''
raise TypeError(UpperCAmelCase_ )
elif len(UpperCAmelCase_ ) % 2 != 0:
a :Any = F'''Odd number of symbols ({len(UpperCAmelCase_ )})'''
raise Exception(UpperCAmelCase_ )
elif pbstring == "":
return {}
pbstring.replace(''' ''' , '''''' )
# Checks if all characters are unique
a :Tuple = set()
for i in pbstring:
if i not in abc:
a :Dict = F'''\'{i}\' not in list of symbols'''
raise Exception(UpperCAmelCase_ )
elif i in tmppbl:
a :List[Any] = F'''Duplicate symbol ({i})'''
raise Exception(UpperCAmelCase_ )
else:
tmppbl.add(UpperCAmelCase_ )
del tmppbl
# Created the dictionary
a :Optional[Any] = {}
for j in range(0 , len(UpperCAmelCase_ ) - 1 , 2 ):
a :Any = pbstring[j + 1]
a :Optional[int] = pbstring[j]
return pb
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : RotorPositionT , UpperCAmelCase_ : RotorSelectionT = (rotora, rotora, rotora) , UpperCAmelCase_ : str = "" , ):
"""simple docstring"""
a :Tuple = text.upper()
a , a , a :Union[str, Any] = _validator(
UpperCAmelCase_ , UpperCAmelCase_ , plugb.upper() )
a , a , a :Optional[Any] = rotor_position
a , a , a :List[str] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
a :Union[str, Any] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
a :List[str] = plugboard[symbol]
# rotor ra --------------------------
a :Dict = abc.index(UpperCAmelCase_ ) + rotorposa
a :List[str] = rotora[index % len(UpperCAmelCase_ )]
# rotor rb --------------------------
a :List[str] = abc.index(UpperCAmelCase_ ) + rotorposa
a :Tuple = rotora[index % len(UpperCAmelCase_ )]
# rotor rc --------------------------
a :List[Any] = abc.index(UpperCAmelCase_ ) + rotorposa
a :List[Any] = rotora[index % len(UpperCAmelCase_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
a :Optional[Any] = reflector[symbol]
# 2nd rotors
a :Dict = abc[rotora.index(UpperCAmelCase_ ) - rotorposa]
a :Any = abc[rotora.index(UpperCAmelCase_ ) - rotorposa]
a :Optional[int] = abc[rotora.index(UpperCAmelCase_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
a :Optional[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(UpperCAmelCase_ ):
a :str = 0
rotorposa += 1
if rotorposa >= len(UpperCAmelCase_ ):
a :List[str] = 0
rotorposa += 1
if rotorposa >= len(UpperCAmelCase_ ):
a :Any = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : int = '''This is my Python script that emulates the Enigma machine from WWII.'''
snake_case : Union[str, Any] = (1, 1, 1)
snake_case : Union[str, Any] = '''pictures'''
snake_case : Optional[int] = (rotora, rotora, rotora)
snake_case : Dict = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 94
|
snake_case : str = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 94
| 1
|
from importlib import import_module
from .logging import get_logger
_lowerCamelCase : str = get_logger(__name__)
class __UpperCAmelCase :
def __init__( self : str, __A : str, __A : Any=None ):
UpperCAmelCase : str = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self, SCREAMING_SNAKE_CASE_, getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase : List[Any] = module._original_module if isinstance(SCREAMING_SNAKE_CASE_, _PatchedModuleObj ) else module
class __UpperCAmelCase :
UpperCamelCase = []
def __init__( self : List[str], __A : Dict, __A : Union[str, Any], __A : Tuple, __A : str=None ):
UpperCAmelCase : Dict = obj
UpperCAmelCase : Any = target
UpperCAmelCase : Any = new
UpperCAmelCase : Optional[Any] = target.split('''.''' )[0]
UpperCAmelCase : int = {}
UpperCAmelCase : List[str] = attrs or []
def __enter__( self : List[str] ):
UpperCAmelCase : int = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
UpperCAmelCase : int = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCAmelCase : Tuple = getattr(self.obj, SCREAMING_SNAKE_CASE_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_, _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCAmelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj, SCREAMING_SNAKE_CASE_, _PatchedModuleObj(SCREAMING_SNAKE_CASE_, attrs=self.attrs ) )
UpperCAmelCase : Optional[Any] = getattr(self.obj, SCREAMING_SNAKE_CASE_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), attrs=self.attrs ) )
UpperCAmelCase : Any = getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCAmelCase : Union[str, Any] = getattr(import_module('''.'''.join(SCREAMING_SNAKE_CASE_ ) ), SCREAMING_SNAKE_CASE_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, SCREAMING_SNAKE_CASE_ ) is attr_value:
UpperCAmelCase : Union[str, Any] = getattr(self.obj, SCREAMING_SNAKE_CASE_ )
setattr(self.obj, SCREAMING_SNAKE_CASE_, self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCAmelCase : str = globals()["""__builtins__"""][target_attr]
setattr(self.obj, SCREAMING_SNAKE_CASE_, self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : List[str], *__A : str ):
for attr in list(self.original ):
setattr(self.obj, SCREAMING_SNAKE_CASE_, self.original.pop(SCREAMING_SNAKE_CASE_ ) )
def __magic_name__ ( self : Optional[Any] ):
self.__enter__()
self._active_patches.append(self )
def __magic_name__ ( self : Optional[Any] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 355
|
from __future__ import annotations
import queue
class __UpperCAmelCase :
def __init__( self : str, __A : Union[str, Any] ):
UpperCAmelCase : Dict = data
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
def a__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
UpperCAmelCase : Any = input('''Enter the value of the root node: ''' ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : Tuple = TreeNode(int(UpperCAmelCase ) )
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = q.get()
UpperCAmelCase : Union[str, Any] = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : List[str] = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCAmelCase )
UpperCAmelCase : List[Any] = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : Dict = right_node
q.put(UpperCAmelCase )
raise
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = []
while not q.empty():
UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCAmelCase )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : List[str] = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Any = node
while n or stack:
while n:
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
UpperCAmelCase : Optional[int] = stack.pop()
print(n.data , end=''',''' )
UpperCAmelCase : Any = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Dict = [], []
UpperCAmelCase : Any = node
stacka.append(UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def a__ ( UpperCAmelCase : str = "" , UpperCAmelCase : int=50 , UpperCAmelCase : Union[str, Any]="*" ) -> str:
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : int = divmod(width - len(UpperCAmelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 99
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _A :
def __init__( self , __lowerCAmelCase , ):
"""simple docstring"""
lowercase = parent
lowercase = 13
lowercase = 7
lowercase = True
lowercase = True
lowercase = True
lowercase = 99
lowercase = 32
lowercase = 2
lowercase = 4
lowercase = 37
lowercase = """gelu"""
lowercase = 0.1
lowercase = 0.1
lowercase = 512
lowercase = 16
lowercase = 2
lowercase = 0.0_2
lowercase = 3
lowercase = 4
lowercase = None
def A__ ( self ):
"""simple docstring"""
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ):
"""simple docstring"""
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = self.prepare_config_and_inputs()
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TFEsmModel(config=__lowerCAmelCase )
lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase = model(__lowerCAmelCase )
lowercase = [input_ids, input_mask]
lowercase = model(__lowerCAmelCase )
lowercase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowercase = True
lowercase = TFEsmModel(config=__lowerCAmelCase )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowercase = model(__lowerCAmelCase )
lowercase = [input_ids, input_mask]
lowercase = model(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase )
# Also check the case where encoder outputs are not passed
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TFEsmForMaskedLM(config=__lowerCAmelCase )
lowercase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = TFEsmForTokenClassification(config=__lowerCAmelCase )
lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ : Dict = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = False
snake_case__ : Optional[int] = False
def A__ ( self ):
"""simple docstring"""
lowercase = TFEsmModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFEsmModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase = model.get_bias()
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for k, v in name.items():
assert isinstance(__lowerCAmelCase , tf.Variable )
else:
lowercase = model.get_output_embeddings()
assert x is None
lowercase = model.get_bias()
assert name is None
@require_tf
class _A ( unittest.TestCase ):
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase = model(__lowerCAmelCase )[0]
lowercase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __lowerCAmelCase )
# compare the actual values for a slice.
lowercase = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowercase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase = model(__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowercase = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 197
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowerCAmelCase : Dict =version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :tuple , lowerCAmelCase__ :Path , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str]=False , ) -> Union[str, Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :bool = False ) -> str:
'''simple docstring'''
lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowercase = """cpu"""
lowercase = Path(lowerCAmelCase__ )
# VAE DECODER
lowercase = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowercase = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase = vae_decoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , 2_5 , 2_5 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowerCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=1_4,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__lowerCAmelCase : Dict =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 197
| 1
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
__UpperCAmelCase = json.load(f)
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self , __A ) -> List[Any]:
return FSMTTokenizer.from_pretrained(__A )
def __lowerCAmelCase ( self , __A ) -> Any:
lowerCAmelCase_ :Any = FSMTForConditionalGeneration.from_pretrained(__A ).to(__A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def __lowerCAmelCase ( self , __A , __A ) -> Dict:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase_ :int = f"""facebook/wmt19-{pair}"""
lowerCAmelCase_ :List[str] = self.get_tokenizer(__A )
lowerCAmelCase_ :Union[str, Any] = self.get_model(__A )
lowerCAmelCase_ :Optional[Any] = bleu_data[pair]["""src"""]
lowerCAmelCase_ :List[Any] = bleu_data[pair]["""tgt"""]
lowerCAmelCase_ :int = tokenizer(__A , return_tensors="""pt""" , truncation=__A , padding="""longest""" ).to(__A )
lowerCAmelCase_ :Dict = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase_ :Tuple = tokenizer.batch_decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )
lowerCAmelCase_ :List[str] = calculate_bleu(__A , __A )
print(__A )
self.assertGreaterEqual(scores["""bleu"""] , __A )
| 359
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__lowercase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__lowercase = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class lowerCamelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Tuple = ["""input_ids""", """attention_mask"""]
a__ : List[Any] = BartTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , __lowercase=True , **__lowercase , ) -> Dict:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase , **__lowercase , )
__UpperCamelCase :List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :int = add_prefix_space
__UpperCamelCase :Optional[Any] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCamelCase :Dict = '''post_processor'''
__UpperCamelCase :List[Any] = getattr(self.backend_tokenizer , __lowercase , __lowercase)
if tokenizer_component_instance:
__UpperCamelCase :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase :Any = tuple(state['''sep'''])
if "cls" in state:
__UpperCamelCase :List[Any] = tuple(state['''cls'''])
__UpperCamelCase :Optional[Any] = False
if state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = add_prefix_space
__UpperCamelCase :Optional[int] = True
if state.get('''trim_offsets''' , __lowercase) != trim_offsets:
__UpperCamelCase :Tuple = trim_offsets
__UpperCamelCase :Optional[int] = True
if changes_to_apply:
__UpperCamelCase :Any = getattr(__lowercase , state.pop('''type'''))
__UpperCamelCase :Tuple = component_class(**__lowercase)
setattr(self.backend_tokenizer , __lowercase , __lowercase)
@property
def UpperCamelCase__ ( self) -> Dict:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase) if isinstance(__lowercase , __lowercase) else value
__UpperCamelCase :int = value
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> str:
__UpperCamelCase :Dict = kwargs.get('''is_split_into_words''' , __lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Dict:
__UpperCamelCase :Optional[int] = kwargs.get('''is_split_into_words''' , __lowercase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> str:
__UpperCamelCase :int = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase=None) -> List[str]:
__UpperCamelCase :Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[Any]:
__UpperCamelCase :Dict = [self.sep_token_id]
__UpperCamelCase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 43
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE :Union[str, Any] = False
SCREAMING_SNAKE_CASE :Any = True
SCREAMING_SNAKE_CASE :Tuple = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE :Dict = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
SCREAMING_SNAKE_CASE :Dict = reader.read()
SCREAMING_SNAKE_CASE :List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE :List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE :List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE :Optional[Any] = config[key]
del config[key]
SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
SCREAMING_SNAKE_CASE :Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
SCREAMING_SNAKE_CASE :List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
SCREAMING_SNAKE_CASE :List[Any] = param_value
SCREAMING_SNAKE_CASE :str = True
if not has_changed:
SCREAMING_SNAKE_CASE :List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 15
| 0
|
def snake_case ( snake_case__ :str) -> int:
_A = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""")
_A = hex_num[0] == """-"""
if is_negative:
_A = hex_num[1:]
try:
_A = int(snake_case__ , 16)
except ValueError:
raise ValueError("""Invalid value was passed to the function""")
_A = """"""
while int_num > 0:
_A = str(int_num % 2) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = "openai/clip-vit-large-patch14" ) -> None:
_A = device
_A = CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
_A = [0.4814_5466, 0.457_8275, 0.4082_1073]
_A = [0.2686_2954, 0.2613_0258, 0.2757_7711]
_A = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_A = torchvision.transforms.Resize(2_24 )
_A = torchvision.transforms.CenterCrop(2_24 )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = self.resize(lowerCAmelCase_ )
_A = self.center_crop(lowerCAmelCase_ )
_A = self.normalize(lowerCAmelCase_ )
return images
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Tuple:
_A = self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = self.preprocess_img(lowerCAmelCase_ )
_A = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=10 , lowerCAmelCase_=0.01 , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="image" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> None:
super().__init__()
_A = None
_A = device if device else get_device()
if vqgan:
_A = vqgan
else:
_A = load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
_A = clip
else:
_A = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
_A = ProcessorGradientFlow(device=self.device )
_A = iterations
_A = lr
_A = log
_A = make_grid
_A = return_val
_A = quantize
_A = self.vqgan.decoder.z_shape
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=5 , lowerCAmelCase_=True ) -> Any:
_A = []
if output_path is None:
_A = """./animation.gif"""
if input_path is None:
_A = self.save_path
_A = sorted(glob(input_path + """/*""" ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(lowerCAmelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
_A = total_duration / len(lowerCAmelCase_ )
_A = [frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
_A = 1.5
_A = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ )
print(F'''gif saved to {output_path}''' )
def UpperCAmelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> str:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
_A = preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=2_56 ).to(self.device )
_A = preprocess_vqgan(lowerCAmelCase_ )
_A , *_A = self.vqgan.encode(lowerCAmelCase_ )
return z
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
_A = self.latent.detach().requires_grad_()
_A = base_latent + transform_vector
if self.quantize:
_A , *_A = self.vqgan.quantize(lowerCAmelCase_ )
else:
_A = trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_A = self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors="""pt""" , padding=lowerCAmelCase_ )
_A = self.clip(**lowerCAmelCase_ )
_A = clip_outputs.logits_per_image
if weights is not None:
_A = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = self._get_clip_similarity(pos_prompts["""prompts"""] , lowerCAmelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
_A = self._get_clip_similarity(neg_prompts["""prompts"""] , lowerCAmelCase_ , weights=neg_prompts["""weights"""] )
else:
_A = torch.tensor([1] , device=self.device )
_A = -torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device )
_A = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_A = self._add_vector(lowerCAmelCase_ )
_A = loop_post_process(lowerCAmelCase_ )
_A = self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print("""CLIP loss""" , lowerCAmelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
wandb.init(reinit=lowerCAmelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
_A = Image.open(lowerCAmelCase_ )
_A = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(lowerCAmelCase_ ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if not prompts:
return []
_A = []
_A = []
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(lowerCAmelCase_ , (tuple, list) ):
_A = prompt[0]
_A = float(prompt[1] )
elif ":" in prompt:
_A , _A = prompt.split(""":""" )
_A = float(lowerCAmelCase_ )
else:
_A = prompt
_A = 1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_ , device=self.device ),
}
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , ) -> str:
if image_path:
_A = self._get_latent(lowerCAmelCase_ )
else:
_A = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
_A = self.process_prompts(lowerCAmelCase_ )
_A = self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
_A = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
_A = save_path + """_""" + get_timestamp()
os.makedirs(lowerCAmelCase_ )
_A = save_path
_A = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
_A = loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 81
| 1
|
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowerCamelCase : Union[str, Any] =parser.parse_args()
lowerCamelCase : Any =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 189
|
from math import factorial
class __a :
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = real
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = [1] * rank
else:
UpperCamelCase__ : int = rank
def __repr__( self : Tuple ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE )
def __add__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
UpperCamelCase__ : Optional[int] = self.duals.copy()
UpperCamelCase__ : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
elif len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
UpperCamelCase__ : Optional[int] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Dict = __add__
def __sub__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : int , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : List[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if n < 0 or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ : str = self
for _ in range(n - 1 ):
x *= self
return x
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
UpperCamelCase__ : Optional[Any] = Dual(__lowerCAmelCase , 1 )
UpperCamelCase__ : Any = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 189
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase_ ( _lowerCamelCase ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=__A )
lowerCamelCase__ : Optional[Any] = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__A )
EnvironmentCommand.register_subcommand(__A )
TestCommand.register_subcommand(__A )
RunBeamCommand.register_subcommand(__A )
DummyDataCommand.register_subcommand(__A )
# Parse args
lowerCamelCase__ , lowerCamelCase__ : Dict = parser.parse_known_args()
if not hasattr(__A , 'func' ):
parser.print_help()
exit(1 )
lowerCamelCase__ : str = parse_unknown_args(__A )
# Run
lowerCamelCase__ : List[Any] = args.func(__A , **__A )
service.run()
if __name__ == "__main__":
main()
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase__ : str = 2_5_0_0_0_4
UpperCAmelCase__ : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Tuple = MBartaaTokenizer
__UpperCamelCase : Any = MBartaaTokenizerFast
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Tuple = True
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """<s>"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_54 )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MBartaaTokenizer(SCREAMING_SNAKE_CASE__ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = {"""input_ids""": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE__ : Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE__ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Any = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE__ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE__ : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''facebook/mbart-large-50-one-to-many-mmt'''
__UpperCamelCase : int = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__UpperCamelCase : Tuple = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__UpperCamelCase : Tuple = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def __magic_name__ (cls ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
SCREAMING_SNAKE_CASE__ : Tuple = 1
return cls
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 25_00_38 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : int = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 10
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_53, 25_00_01] )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = MBartaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Tuple = targets["""input_ids"""]
SCREAMING_SNAKE_CASE__ : List[str] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[25_00_04, 62, 30_34, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 25
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase , 'hidden_sizes'))
self.parent.assertTrue(hasattr(lowercase , 'num_attention_heads'))
self.parent.assertTrue(hasattr(lowercase , 'num_encoder_blocks'))
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[16, 32, 64, 128] , lowercase=[1, 4, 8, 16] , lowercase=[1, 2, 4, 8] , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = parent
a__ : int = batch_size
a__ : Tuple = image_size
a__ : Union[str, Any] = num_channels
a__ : str = num_encoder_blocks
a__ : Dict = sr_ratios
a__ : Dict = depths
a__ : Union[str, Any] = hidden_sizes
a__ : str = downsampling_rates
a__ : Tuple = num_attention_heads
a__ : Optional[Any] = is_training
a__ : Union[str, Any] = use_labels
a__ : Any = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Optional[Any] = initializer_range
a__ : Tuple = num_labels
a__ : Union[str, Any] = scope
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : str = None
if self.use_labels:
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__ : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Any:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__ : Dict = SegformerModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[Any] = model(lowercase)
a__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def __lowercase ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.num_labels
a__ : List[str] = SegformerForSemanticSegmentation(lowercase)
model.to(lowercase)
model.eval()
a__ : List[str] = model(lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
a__ : int = model(lowercase , labels=lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = 1
a__ : Optional[int] = SegformerForSemanticSegmentation(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Union[str, Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowercase)
a__ : Optional[Any] = model(lowercase , labels=lowercase)
self.parent.assertGreater(result.loss , 0.0)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : str = config_and_inputs
a__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__A : List[str] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : List[str] = True
__A : Any = False
__A : Any = False
__A : str = False
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = SegformerModelTester(self)
a__ : Optional[Any] = SegformerConfigTester(self , config_class=lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase)
@unittest.skip('SegFormer does not use inputs_embeds')
def __lowercase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def __lowercase ( self) -> str:
'''simple docstring'''
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowercase)
a__ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
a__ : str = True
a__ : List[str] = False
a__ : int = True
a__ : List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[Any] = outputs.attentions
a__ : Dict = sum(self.model_tester.depths)
self.assertEqual(len(lowercase) , lowercase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : Dict = True
a__ : int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__ : Tuple = (self.model_tester.image_size // 4) ** 2
a__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a__ : str = (self.model_tester.image_size // 32) ** 2
a__ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a__ : Dict = len(lowercase)
# Check attention is always last and order is fine
a__ : List[Any] = True
a__ : Any = True
a__ : Dict = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__ : int = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__ : List[Any] = (self.model_tester.image_size // 4) ** 2
a__ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__ : Optional[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Union[str, Any] = outputs.hidden_states
a__ : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase) , lowercase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : int = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase):
continue
a__ : Dict = model_class(lowercase)
model.to(lowercase)
model.train()
a__ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__ : Optional[int] = model(**lowercase).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = SegformerModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> int:
a__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : int = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__ : Optional[int] = prepare_img()
a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt')
a__ : List[str] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Optional[int] = model(lowercase)
a__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-4))
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(lowercase)
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt')
a__ : List[str] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Optional[Any] = model(lowercase)
a__ : List[Any] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Optional[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-1))
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__ : Any = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt')
a__ : Optional[int] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Union[str, Any] = model(lowercase)
a__ : int = outputs.logits.detach().cpu()
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(500, 300)])
a__ : Optional[Any] = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , lowercase)
a__ : Any = image_processor.post_process_semantic_segmentation(outputs=lowercase)
a__ : Union[str, Any] = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , lowercase)
| 99
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case ) -> List[Any]:
lowercase__: str = set()
lowercase__: Tuple = []
def parse_line(snake_case ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__: Dict = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__: Any = '\n'.join(SCREAMING_SNAKE_CASE_ )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE_ )
buffer.clear()
continue
else:
lowercase__: Tuple = line.strip()
buffer.append(SCREAMING_SNAKE_CASE_ )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE_ ):
lowercase__: List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE_ ) as fp:
parse_line(SCREAMING_SNAKE_CASE_ )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case_ ( snake_case , snake_case ) -> str:
lowercase__: int = set()
lowercase__: List[Any] = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ ( snake_case ) -> Union[str, Any]:
return values.split(',' )
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
__lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 366
|
from collections import deque
from math import floor
from random import random
from time import time
class __a :
def __init__( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: int = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
lowercase__: Union[str, Any] = []
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
lowercase__: Tuple = []
lowercase__: Tuple = []
if s == -2:
lowercase__: Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[int] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
lowercase__: int = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Dict:
'''simple docstring'''
lowercase__: int = deque()
lowercase__: Dict = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: str = []
if s == -2:
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: List[Any] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
lowercase__: int = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = []
lowercase__: int = []
lowercase__: List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Union[str, Any] = []
lowercase__: List[str] = s
lowercase__: Dict = False
lowercase__: Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Any = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Union[str, Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: int = s
lowercase__: str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = []
lowercase__: int = []
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Optional[int] = -2
lowercase__: List[Any] = []
lowercase__: List[str] = s
lowercase__: List[Any] = False
lowercase__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Any = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Dict = s
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Optional[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[str]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
class __a :
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> List[Any]:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: Union[str, Any] = [[w, u]]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
lowercase__: str = []
lowercase__: int = []
if s == -2:
lowercase__: Tuple = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
lowercase__: Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = deque()
lowercase__: List[Any] = []
if s == -2:
lowercase__: str = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = []
lowercase__: Dict = []
lowercase__: Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Dict = []
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: str = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Dict = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: int = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Tuple = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Tuple = -2
lowercase__: Any = []
lowercase__: int = s
lowercase__: Optional[int] = False
lowercase__: List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Optional[Any] = s
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
| 288
| 0
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE__ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
SCREAMING_SNAKE_CASE__ = dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE__ = dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE__ = train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE__ = PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE__ = poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE__ = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
plt.scatter(snake_case_ , snake_case_ , color='red' )
plt.plot(snake_case_ , pol_reg.predict(poly_reg.fit_transform(snake_case_ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 150
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 0
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCAmelCase : Union[str, Any] ='''bert-base-cased'''
lowerCAmelCase : Optional[Any] ='''google/pegasus-xsum'''
lowerCAmelCase : Optional[int] =[''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCAmelCase : str =['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCAmelCase : List[str] ='''patrickvonplaten/t5-tiny-random'''
lowerCAmelCase : Union[str, Any] ='''sshleifer/bart-tiny-random'''
lowerCAmelCase : Union[str, Any] ='''sshleifer/tiny-mbart'''
lowerCAmelCase : Any ='''sshleifer/tiny-marian-en-de'''
def UpperCAmelCase_ ( __lowerCamelCase : Path ,__lowerCamelCase : list ):
lowercase_ :Optional[int] = "\n".join(__lowerCamelCase )
Path(__lowerCamelCase ).open("w" ).writelines(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[Any] ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCamelCase ,F'{split}.source' ) ,__lowerCamelCase )
_dump_articles(os.path.join(__lowerCamelCase ,F'{split}.target' ) ,__lowerCamelCase )
return tmp_dir
class a_ ( _lowerCAmelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowercase__ ( self : Optional[int] , lowercase : str ):
"""simple docstring"""
lowercase_ :Any = AutoTokenizer.from_pretrained(lowercase )
lowercase_ :Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase_ :Optional[int] = max(len(tokenizer.encode(lowercase ) ) for a in ARTICLES )
lowercase_ :Tuple = max(len(tokenizer.encode(lowercase ) ) for a in SUMMARIES )
lowercase_ :Any = 4
lowercase_ :Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowercase_ :str = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
lowercase_ :Dict = SeqaSeqDataset(
lowercase , data_dir=lowercase , type_path="train" , max_source_length=lowercase , max_target_length=lowercase , src_lang=lowercase , tgt_lang=lowercase , )
lowercase_ :Any = DataLoader(lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowercase , lowercase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowercase_ :Optional[Any] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowercase__ ( self : Tuple , lowercase : Any ):
"""simple docstring"""
lowercase_ :int = AutoTokenizer.from_pretrained(lowercase )
lowercase_ :List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase_ :Tuple = max(len(tokenizer.encode(lowercase ) ) for a in ARTICLES )
lowercase_ :Dict = max(len(tokenizer.encode(lowercase ) ) for a in SUMMARIES )
lowercase_ :Optional[Any] = 4
lowercase_ :Union[str, Any] = LegacySeqaSeqDataset(
lowercase , data_dir=lowercase , type_path="train" , max_source_length=20 , max_target_length=lowercase , )
lowercase_ :Tuple = DataLoader(lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Any = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
lowercase_ :str = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowercase_ :Optional[int] = tmp_dir.joinpath("train.source" ).open().readlines()
lowercase_ :Optional[int] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowercase , lowercase , 128 , lowercase )
lowercase_ :Optional[int] = {x.name for x in tmp_dir.iterdir()}
lowercase_ :Optional[int] = {x.name for x in save_dir.iterdir()}
lowercase_ :Union[str, Any] = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowercase ) < len(lowercase )
assert len(lowercase ) == 1
assert len(packed_examples[0] ) == sum(len(lowercase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
lowercase_ :Any = self._get_dataset(max_len=64 )
lowercase_ :Tuple = 64
lowercase_ :Dict = ds.make_dynamic_sampler(lowercase , required_batch_size_multiple=lowercase )
lowercase_ :List[str] = [len(lowercase ) for x in batch_sampler]
assert len(set(lowercase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowercase ) == len(lowercase ) # no dropped or added examples
lowercase_ :Dict = DataLoader(lowercase , batch_sampler=lowercase , collate_fn=ds.collate_fn , num_workers=2 )
lowercase_ :int = []
lowercase_ :Optional[int] = []
for batch in data_loader:
lowercase_ :List[Any] = batch["input_ids"].shape
lowercase_ :Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowercase_ :Optional[Any] = np.product(batch["input_ids"].shape )
num_src_per_batch.append(lowercase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowercase )
assert num_src_per_batch[0] == max(lowercase )
if failures:
raise AssertionError(F'too many tokens in {len(lowercase )} batches' )
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :int = self._get_dataset(max_len=512 )
lowercase_ :Tuple = 2
lowercase_ :Optional[int] = ds.make_sortish_sampler(lowercase , shuffle=lowercase )
lowercase_ :List[Any] = DataLoader(lowercase , batch_size=lowercase , collate_fn=ds.collate_fn , num_workers=2 )
lowercase_ :Dict = DataLoader(lowercase , batch_size=lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowercase )
lowercase_ :int = tokenizer.pad_token_id
def count_pad_tokens(lowercase : Union[str, Any] , lowercase : int="input_ids" ):
return [batch[k].eq(lowercase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowercase , k="labels" ) ) < sum(count_pad_tokens(lowercase , k="labels" ) )
assert sum(count_pad_tokens(lowercase ) ) < sum(count_pad_tokens(lowercase ) )
assert len(lowercase ) == len(lowercase )
def lowercase__ ( self : Any , lowercase : Optional[int]=1_000 , lowercase : List[str]=128 ):
"""simple docstring"""
if os.getenv("USE_REAL_DATA" , lowercase ):
lowercase_ :Dict = "examples/seq2seq/wmt_en_ro"
lowercase_ :str = max_len * 2 * 64
if not Path(lowercase ).joinpath("train.len" ).exists():
save_len_file(lowercase , lowercase )
else:
lowercase_ :str = "examples/seq2seq/test_data/wmt_en_ro"
lowercase_ :int = max_len * 4
save_len_file(lowercase , lowercase )
lowercase_ :Any = AutoTokenizer.from_pretrained(lowercase )
lowercase_ :List[str] = SeqaSeqDataset(
lowercase , data_dir=lowercase , type_path="train" , max_source_length=lowercase , max_target_length=lowercase , n_obs=lowercase , )
return ds, max_tokens, tokenizer
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :str = self._get_dataset()
lowercase_ :Optional[Any] = set(DistributedSortishSampler(lowercase , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowercase ) )
lowercase_ :Any = set(DistributedSortishSampler(lowercase , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowercase ) )
assert idsa.intersection(lowercase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowercase__ ( self : str , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
if tok_name == MBART_TINY:
lowercase_ :Dict = SeqaSeqDataset(
lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
lowercase_ :str = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowercase_ :List[Any] = SeqaSeqDataset(
lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
lowercase_ :Union[str, Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowercase ) == 1 if tok_name == BART_TINY else len(lowercase ) == 0
| 369
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = ["input_features"]
def __init__( self : Any , lowercase : Tuple=80 , lowercase : Optional[int]=16_000 , lowercase : Optional[Any]=160 , lowercase : Optional[int]=30 , lowercase : List[Any]=400 , lowercase : Dict=0.0 , lowercase : Tuple=False , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
lowercase_ :Optional[int] = n_fft
lowercase_ :List[Any] = hop_length
lowercase_ :Tuple = chunk_length
lowercase_ :List[str] = chunk_length * sampling_rate
lowercase_ :Optional[Any] = self.n_samples // hop_length
lowercase_ :Any = sampling_rate
lowercase_ :List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase , norm="slaney" , mel_scale="slaney" , )
def lowercase__ ( self : str , lowercase : np.array ):
"""simple docstring"""
lowercase_ :Any = spectrogram(
lowercase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase_ :Any = log_spec[:, :-1]
lowercase_ :List[Any] = np.maximum(lowercase , log_spec.max() - 8.0 )
lowercase_ :Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase__ ( lowercase : List[np.ndarray] , lowercase : List[np.ndarray] , lowercase : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowercase_ :Optional[int] = np.array(lowercase , np.intaa )
lowercase_ :Any = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowercase_ :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowercase_ :List[Any] = padding_value
normed_input_values.append(lowercase )
else:
lowercase_ :List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase : bool = True , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Optional[bool] = None , lowercase : Optional[str] = "max_length" , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , **lowercase : Union[str, Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase_ :List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase_ :Optional[Any] = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ :Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowercase_ :List[Any] = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ :Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ :Optional[int] = [np.asarray([raw_speech] ).T]
lowercase_ :int = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase_ :Tuple = self.pad(
lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase_ :Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase_ :List[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase_ :Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase_ :List[str] = [self._np_extract_fbank_features(lowercase ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase ):
lowercase_ :Tuple = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
else:
lowercase_ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase_ :Dict = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase_ :Tuple = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ :List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 147
| 0
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCamelCase_ : Union[str, Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCamelCase_ : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print("""\n""".join(upper_files) + """\n""")
lowerCamelCase_ : Any = [file for file in filepaths if """ """ in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print("""\n""".join(space_files) + """\n""")
lowerCamelCase_ : Dict = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print("""\n""".join(hyphen_files) + """\n""")
lowerCamelCase_ : Union[str, Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print("""\n""".join(nodir_files) + """\n""")
lowerCamelCase_ : Optional[int] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 81
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "conditional_detr"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=300 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=2 , __A=5 , __A=2 , __A=1 , __A=1 , __A=2 , __A=5 , __A=2 , __A=0.25 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
a =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__A , __A ):
a =backbone_config.get('''model_type''' )
a =CONFIG_MAPPING[backbone_model_type]
a =config_class.from_dict(__A )
a =use_timm_backbone
a =backbone_config
a =num_channels
a =num_queries
a =d_model
a =encoder_ffn_dim
a =encoder_layers
a =encoder_attention_heads
a =decoder_ffn_dim
a =decoder_layers
a =decoder_attention_heads
a =dropout
a =attention_dropout
a =activation_dropout
a =activation_function
a =init_std
a =init_xavier_std
a =encoder_layerdrop
a =decoder_layerdrop
a =encoder_layers
a =auxiliary_loss
a =position_embedding_type
a =backbone
a =use_pretrained_backbone
a =dilation
# Hungarian matcher
a =class_cost
a =bbox_cost
a =giou_cost
# Loss coefficients
a =mask_loss_coefficient
a =dice_loss_coefficient
a =cls_loss_coefficient
a =bbox_loss_coefficient
a =giou_loss_coefficient
a =focal_alpha
super().__init__(is_encoder_decoder=__A , **__A )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a =self.backbone_config.to_dict()
a =self.__class__.model_type
return output
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return 12
| 81
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A__ :
def __init__( self : Optional[int] , _a : Any , ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =13
_SCREAMING_SNAKE_CASE =7
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =99
_SCREAMING_SNAKE_CASE =32
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =37
_SCREAMING_SNAKE_CASE ='gelu'
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =512
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =0.02
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =None
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Any , _a : int , _a : str , _a : List[str] , _a : Union[str, Any] , _a : List[str] , _a : Tuple ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDistilBertModel(config=_a )
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
_SCREAMING_SNAKE_CASE =model(_a )
_SCREAMING_SNAKE_CASE =[input_ids, input_mask]
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[Any] , _a : int , _a : List[Any] , _a : Dict , _a : Tuple , _a : List[Any] , _a : Tuple ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDistilBertForMaskedLM(config=_a )
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Dict , _a : List[str] , _a : Optional[int] , _a : List[str] , _a : int , _a : Tuple , _a : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDistilBertForQuestionAnswering(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Union[str, Any] , _a : Optional[int] , _a : Optional[int] , _a : Optional[int] , _a : List[str] , _a : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFDistilBertForSequenceClassification(_a )
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[int] , _a : List[str] , _a : Dict , _a : str , _a : Dict , _a : Any , _a : int ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =TFDistilBertForMultipleChoice(_a )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str , _a : str , _a : Tuple , _a : Any , _a : Union[str, Any] , _a : str , _a : Tuple ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFDistilBertForTokenClassification(_a )
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Dict ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A__ = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDistilBertModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , dim=37 )
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_a )
def A ( self : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_a )
def A ( self : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_a )
def A ( self : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_a )
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_a )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_a )
@slow
def A ( self : int ) -> str:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_SCREAMING_SNAKE_CASE =TFDistilBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def A ( self : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
_SCREAMING_SNAKE_CASE =tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
_SCREAMING_SNAKE_CASE =[1, 6, 768]
self.assertEqual(output.shape , _a )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-4 )
| 114
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCamelCase : List[str] = {"UserAgent": UserAgent().random}
def _lowerCAmelCase ( _UpperCamelCase : str ) -> dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =script.contents[0]
_SCREAMING_SNAKE_CASE =json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A__ :
def __init__( self : int , _a : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =f"https://www.instagram.com/{username}/"
_SCREAMING_SNAKE_CASE =self.get_json()
def A ( self : Optional[int] ) -> dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =requests.get(self.url , headers=_a ).text
_SCREAMING_SNAKE_CASE =BeautifulSoup(_a , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : str ) -> str:
'''simple docstring'''
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ) -> str:
'''simple docstring'''
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def A ( self : List[Any] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def A ( self : str ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def A ( self : Any ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def A ( self : Tuple ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def A ( self : List[str] ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def A ( self : Dict ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def A ( self : Tuple ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def _lowerCAmelCase ( _UpperCamelCase : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_SCREAMING_SNAKE_CASE =InstagramUser(_UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[int] = InstagramUser("github")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 114
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = BlipImageProcessor()
__UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
__UpperCamelCase = BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__UpperCamelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(__UpperCAmelCase , return_tensors='np' )
__UpperCamelCase = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCamelCase = 'lower newer'
__UpperCamelCase = processor(text=__UpperCAmelCase )
__UpperCamelCase = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCamelCase = 'lower newer'
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(__UpperCAmelCase )
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCamelCase = 'lower newer'
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 316
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = 100
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = out_indices
__UpperCamelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase = False
__UpperCamelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> int:
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
__UpperCAmelCase )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits
# verify the logits
__UpperCamelCase = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
__UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__UpperCamelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=__UpperCAmelCase , )
else:
__UpperCamelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__UpperCamelCase = model.to(__UpperCAmelCase )
__UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
__UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__UpperCamelCase = Image.open(ds[0]['file'] )
__UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs.logits.detach().cpu()
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
__UpperCamelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
__UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
__UpperCamelCase = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 316
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_lowercase : List[Any] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowerCamelCase ( UpperCAmelCase__ : str = "mumbai" ):
lowercase_ : List[Any] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
lowercase_ : Optional[int] = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
lowercase_ : Union[str, Any] = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 366
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : int = logging.get_logger(__name__)
@dataclass
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Optional[Any] , **lowercase_ : int ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ : Optional[int] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ : Tuple = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ : List[Any] = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ : List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**lowercase_ )
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Trace the models using torchscript'''})
UpperCamelCase__ = field(default=_UpperCAmelCase, metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
UpperCamelCase__ = field(
default='''O1''', metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
}, )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ : Optional[Any] = torch.device("""cpu""" )
lowercase_ : Tuple = 0
elif is_torch_tpu_available():
lowercase_ : Optional[int] = xm.xla_device()
lowercase_ : str = 0
else:
lowercase_ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ : str = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.n_gpu > 0
| 21
| 0
|
'''simple docstring'''
import torch
from torch import nn
class lowerCamelCase_ (nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , A : Dict , A : Tuple , A : Optional[Any] , A : Tuple , A : Union[str, Any]=1 , A : str=False ):
super().__init__()
_UpperCAmelCase : Union[str, Any] = n_token
_UpperCAmelCase : List[Any] = d_embed
_UpperCAmelCase : List[str] = d_proj
_UpperCAmelCase : Union[str, Any] = cutoffs + [n_token]
_UpperCAmelCase : str = [0] + self.cutoffs
_UpperCAmelCase : Dict = div_val
_UpperCAmelCase : Tuple = self.cutoffs[0]
_UpperCAmelCase : Tuple = len(self.cutoffs ) - 1
_UpperCAmelCase : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase : Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_UpperCAmelCase : Optional[int] = nn.Parameter(torch.zeros(self.n_clusters ) )
_UpperCAmelCase : str = nn.ModuleList()
_UpperCAmelCase : Dict = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(A , A ) ) )
else:
self.out_projs.append(A )
self.out_layers.append(nn.Linear(A , A ) )
else:
for i in range(len(self.cutoffs ) ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Tuple = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(A , A ) ) )
self.out_layers.append(nn.Linear(A , r_idx - l_idx ) )
_UpperCAmelCase : Dict = keep_order
def _A ( self : Optional[int] , A : Optional[int] , A : List[str] , A : Optional[Any] , A : List[str] ):
if proj is None:
_UpperCAmelCase : Optional[int] = nn.functional.linear(A , A , bias=A )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase : Optional[int] = nn.functional.linear(A , proj.t().contiguous() )
_UpperCAmelCase : str = nn.functional.linear(A , A , bias=A )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _A ( self : Optional[int] , A : Tuple , A : Union[str, Any]=None , A : Any=False ):
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase : Union[str, Any] = hidden[..., :-1, :].contiguous()
_UpperCAmelCase : str = labels[..., 1:].contiguous()
_UpperCAmelCase : Any = hidden.view(-1 , hidden.size(-1 ) )
_UpperCAmelCase : str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
_UpperCAmelCase : Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_UpperCAmelCase : Any = self._compute_logit(A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_UpperCAmelCase : Optional[int] = labels != -100
_UpperCAmelCase : List[str] = torch.zeros_like(A , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase : List[str] = (
-nn.functional.log_softmax(A , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_UpperCAmelCase : str = nn.functional.log_softmax(A , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Any = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase : Optional[int] = self.out_layers[i].weight
_UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase : int = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(A )
biases.append(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase : Dict = self._compute_logit(A , A , A , A )
_UpperCAmelCase : List[str] = nn.functional.log_softmax(A , dim=1 )
if labels is None:
_UpperCAmelCase : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_UpperCAmelCase : int = torch.zeros_like(A , dtype=hidden.dtype , device=hidden.device )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Any = [0] + self.cutoffs
for i in range(len(A ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase : List[Any] = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase : Any = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase : Union[str, Any] = labels.index_select(0 , A ) - l_idx
_UpperCAmelCase : Any = head_logprob.index_select(0 , A )
_UpperCAmelCase : Dict = hidden.index_select(0 , A )
else:
_UpperCAmelCase : List[Any] = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase : str = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase : Optional[Any] = self._compute_logit(A , A , A , A )
_UpperCAmelCase : Optional[int] = nn.functional.log_softmax(A , dim=1 )
_UpperCAmelCase : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_UpperCAmelCase : str = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , A , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _A ( self : Optional[int] , A : str ):
if self.n_clusters == 0:
_UpperCAmelCase : List[str] = self._compute_logit(A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(A , dim=-1 )
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase : List[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase : int = self.out_layers[i].weight
_UpperCAmelCase : List[str] = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_UpperCAmelCase : Any = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(A )
biases.append(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase : Optional[Any] = self._compute_logit(A , A , A , A )
_UpperCAmelCase : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_UpperCAmelCase : Any = nn.functional.log_softmax(A , dim=1 )
_UpperCAmelCase : Optional[Any] = [0] + self.cutoffs
for i in range(len(A ) - 1 ):
_UpperCAmelCase , _UpperCAmelCase : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase : str = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase : int = self._compute_logit(A , A , A , A )
_UpperCAmelCase : List[str] = nn.functional.log_softmax(A , dim=1 )
_UpperCAmelCase : Optional[Any] = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase : Any = logprob_i
return out
| 31
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Any ):
_snake_case = tempfile.mkdtemp()
# fmt: off
_snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Tuple , **_lowerCamelCase : Any ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : int , **_lowerCamelCase : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Any ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
_snake_case = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : int ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase )
_snake_case = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = CLIPProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 288
| 0
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case : Union[str, Any] = logging.getLogger(__name__)
def __lowerCamelCase ( __snake_case : torch.nn.Module, __snake_case : BnbQuantizationConfig, __snake_case : Union[str, os.PathLike] = None, __snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None, __snake_case : Optional[List[str]] = None, __snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None, __snake_case : Optional[Union[str, os.PathLike]] = None, __snake_case : bool = False, ) -> Optional[Any]:
"""simple docstring"""
A__ : int =bnb_quantization_config.load_in_abit
A__ : List[Any] =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
A__ : Optional[Any] =[]
# custom device map
if isinstance(A__, A__ ) and len(device_map.keys() ) > 1:
A__ : str =[key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
A__ : Any =get_keys_to_not_convert(A__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A__ )
A__ : str =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
A__ : Any =[]
A__ : Optional[Any] =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A__ )
# compatibility with peft
A__ : List[str] =load_in_abit
A__ : Tuple =load_in_abit
A__ : Dict =get_parameter_device(A__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
A__ : List[str] =replace_with_bnb_layers(A__, A__, modules_to_not_convert=A__ )
# convert param to the right dtype
A__ : Optional[int] =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
A__ : int =name.replace(""".weight""", """""" ).replace(""".bias""", """""" )
A__ : Optional[Any] =getattr(A__, A__, A__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A__ ):
param.to(A__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
A__ : str =replace_with_bnb_layers(
A__, A__, modules_to_not_convert=A__ )
A__ : List[str] =get_quantized_model_device_map(
A__, A__, A__, max_memory=A__, no_split_module_classes=A__, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
A__ : Any =True
A__ : Union[str, Any] =any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
A__, A__, A__, dtype=bnb_quantization_config.torch_dtype, offload_folder=A__, offload_state_dict=A__, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(A__, device_map=A__, offload_dir=A__ )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Any, __snake_case : Tuple=None, __snake_case : List[Any]=None, __snake_case : Any=None ) -> Tuple:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
A__ : Dict ={"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(A__, A__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
A__ : List[str] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
A__ : Optional[int] ={}
A__ : Optional[Any] =special_dtypes
A__ : Any =no_split_module_classes
A__ : Any =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
A__ : Any =get_balanced_memory(
A__, low_zero=(device_map == """balanced_low_0"""), max_memory=A__, **A__, )
A__ : List[str] =max_memory
A__ : Optional[int] =infer_auto_device_map(A__, **A__ )
if isinstance(A__, A__ ):
# check if don't have any quantized module on the cpu
A__ : int =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
A__ : List[Any] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Optional[Any], __snake_case : Any=None, __snake_case : Any=None ) -> List[str]:
"""simple docstring"""
if modules_to_not_convert is None:
A__ : Optional[Any] =[]
A__ : List[str] =_replace_with_bnb_layers(
A__, A__, A__, A__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[str], __snake_case : Optional[Any]=None, __snake_case : Dict=None, ) -> Any:
"""simple docstring"""
A__ : int =False
for name, module in model.named_children():
if current_key_name is None:
A__ : List[str] =[]
current_key_name.append(A__ )
if isinstance(A__, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
A__ : Tuple =""".""".join(A__ )
A__ : Any =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
A__ : str =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
A__ : Optional[Any] =bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=A__, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
A__ : Dict =bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
A__ : Dict =module.weight.data
if module.bias is not None:
A__ : Tuple =module.bias.data
bnb_module.requires_grad_(A__ )
setattr(A__, A__, A__ )
A__ : Any =True
if len(list(module.children() ) ) > 0:
A__ : Optional[Any] =_replace_with_bnb_layers(
A__, A__, A__, A__ )
A__ : Optional[Any] =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( __snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
with init_empty_weights():
A__ : Dict =deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
A__ : Tuple =find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__, A__ ):
A__ : Union[str, Any] =sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
A__ : Union[str, Any] =sum(A__, [] )
A__ : Optional[Any] =len(A__ ) > 0
# Check if it is a base model
A__ : List[Any] =False
if hasattr(A__, """base_model_prefix""" ):
A__ : Optional[Any] =not hasattr(A__, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ : str =list(model.named_children() )
A__ : List[str] =[list_modules[-1][0]]
# add last module together with tied weights
A__ : List[Any] =set(A__ ) - set(A__ )
A__ : List[str] =list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
A__ : Optional[Any] =[""".weight""", """.bias"""]
A__ : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ : int =name.replace(A__, """""" )
filtered_module_names.append(A__ )
return filtered_module_names
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> str:
"""simple docstring"""
for m in model.modules():
if isinstance(A__, bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( __snake_case : nn.Module ) -> Union[str, Any]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int], __snake_case : Union[str, Any], __snake_case : Dict, __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : Optional[int] ) -> int:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(A__, A__, 0, dtype=A__, value=A__ )
A__ : List[str] =param_name
A__ : int =model
if "." in tensor_name:
A__ : Optional[int] =tensor_name.split(""".""" )
for split in splits[:-1]:
A__ : str =getattr(A__, A__ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
A__ : int =new_module
A__ : List[Any] =splits[-1]
# offload weights
A__ : Dict =False
offload_weight(module._parameters[tensor_name], A__, A__, index=A__ )
if hasattr(module._parameters[tensor_name], """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace("""weight""", """SCB""" ), A__, index=A__, )
else:
offload_weight(A__, A__, A__, index=A__ )
offload_weight(A__, param_name.replace("""weight""", """SCB""" ), A__, index=A__ )
set_module_tensor_to_device(A__, A__, """meta""", dtype=A__, value=torch.empty(*param.size() ) )
| 351
|
'''simple docstring'''
import math
__snake_case : List[Any] = 10
__snake_case : Dict = 7
__snake_case : str = BALLS_PER_COLOUR * NUM_COLOURS
def __lowerCamelCase ( __snake_case : int = 20 ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =math.comb(__snake_case, __snake_case )
A__ : str =math.comb(NUM_BALLS - BALLS_PER_COLOUR, __snake_case )
A__ : Optional[int] =NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 136
| 0
|
'''simple docstring'''
import numpy as np
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : float = 1e-12 ,_UpperCAmelCase : int = 100 ,) -> Dict:
assert np.shape(lowerCAmelCase__ )[0] == np.shape(lowerCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCAmelCase__ )[0] == np.shape(lowerCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCAmelCase__ ) == np.iscomplexobj(lowerCAmelCase__ )
_a : Union[str, Any] =np.iscomplexobj(lowerCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCAmelCase__ ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] =False
_a : Any =0
_a : int =0
_a : Union[str, Any] =1e12
while not convergence:
# Multiple matrix by the vector.
_a : Tuple =np.dot(lowerCAmelCase__ ,lowerCAmelCase__ )
# Normalize the resulting output vector.
_a : Dict =w / np.linalg.norm(lowerCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : int =vector.conj().T if is_complex else vector.T
_a : int =np.dot(lowerCAmelCase__ ,np.dot(lowerCAmelCase__ ,lowerCAmelCase__ ) )
# Check convergence.
_a : Tuple =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Optional[Any] =True
_a : List[str] =lambda_
if is_complex:
_a : str =np.real(lambda_ )
return lambda_, vector
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_a : Optional[int] =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : Tuple =np.array([41, 4, 20] )
_a : Optional[int] =real_input_matrix.astype(np.complexaaa )
_a : Tuple =np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : str =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : List[Any] =real_input_matrix
_a : List[Any] =real_vector
elif problem_type == "complex":
_a : Tuple =complex_input_matrix
_a : Optional[Any] =complex_vector
# Our implementation.
_a : List[Any] =power_iteration(lowerCAmelCase__ ,lowerCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a : List[Any] =np.linalg.eigh(lowerCAmelCase__ )
# Last eigenvalue is the maximum one.
_a : List[Any] =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : Tuple =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCAmelCase__ ) - np.abs(lowerCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 276
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Optional[Any] = '▁'
a : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
a : Optional[Any] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
a : Any = {
'facebook/xglm-564M': 2_048,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCAmelCase_: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase_: Optional[int] = 7
UpperCAmelCase_: Dict = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
UpperCAmelCase_: List[Any] = kwargs.get("""additional_special_tokens""", [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_: Dict = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_: Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase_: Union[str, Any] = len(self.sp_model )
UpperCAmelCase_: Optional[int] = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> Any:
UpperCAmelCase_: List[Any] = self.__dict__.copy()
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: List[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
UpperCAmelCase_: int = {}
UpperCAmelCase_: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase_: List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __snake_case (self ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_: str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = """""".join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, """ """ ).strip()
return out_string
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_: List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, """wb""" ) as fi:
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 147
| 0
|
from __future__ import annotations
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1 or n <= 1:
return
insert_next(UpperCamelCase_ , n - 1 )
rec_insertion_sort(UpperCamelCase_ , n - 1 )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if index >= len(UpperCamelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
collection[index],
collection[index - 1],
)
insert_next(UpperCamelCase_ , index + 1 )
if __name__ == "__main__":
__snake_case = input("""Enter integers separated by spaces: """)
__snake_case = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 169
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case = False
try:
__snake_case = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = [] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = choices
SCREAMING_SNAKE_CASE__ = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = '*'
else:
SCREAMING_SNAKE_CASE__ = '➔ '
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase_ )
else:
forceWrite(self.choices[index] , UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(UpperCAmelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def A_ ( self : List[Any] , UpperCAmelCase_ : Direction , UpperCAmelCase_ : int = 1 ):
SCREAMING_SNAKE_CASE__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase_ )
move_cursor(UpperCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A_ ( self : Optional[Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A_ ( self : List[Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A_ ( self : Dict ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A_ ( self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase_ )
else:
return
else:
return
def A_ ( self : Optional[int] , UpperCAmelCase_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
SCREAMING_SNAKE_CASE__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ = default_choice
else:
SCREAMING_SNAKE_CASE__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCAmelCase_ , '\n' )
return choice
| 169
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Optional[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = 'yolos'
def __init__( self : int , __lowercase : Optional[Any]=768 , __lowercase : List[str]=12 , __lowercase : List[Any]=12 , __lowercase : Optional[Any]=3072 , __lowercase : Any="gelu" , __lowercase : List[Any]=0.0 , __lowercase : Tuple=0.0 , __lowercase : List[str]=0.02 , __lowercase : Any=1e-1_2 , __lowercase : Optional[Any]=[512, 864] , __lowercase : str=16 , __lowercase : Optional[Any]=3 , __lowercase : int=True , __lowercase : Optional[Any]=100 , __lowercase : str=True , __lowercase : str=False , __lowercase : Dict=1 , __lowercase : List[str]=5 , __lowercase : Dict=2 , __lowercase : Optional[Any]=5 , __lowercase : int=2 , __lowercase : List[Any]=0.1 , **__lowercase : Optional[int] , ) -> Union[str, Any]:
super().__init__(**__lowercase )
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : List[str] = qkv_bias
__UpperCAmelCase : str = num_detection_tokens
__UpperCAmelCase : int = use_mid_position_embeddings
__UpperCAmelCase : Optional[Any] = auxiliary_loss
# Hungarian matcher
__UpperCAmelCase : Dict = class_cost
__UpperCAmelCase : Any = bbox_cost
__UpperCAmelCase : List[str] = giou_cost
# Loss coefficients
__UpperCAmelCase : Dict = bbox_loss_coefficient
__UpperCAmelCase : List[str] = giou_loss_coefficient
__UpperCAmelCase : Union[str, Any] = eos_coefficient
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = version.parse('1.11' )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> float:
return 1e-4
@property
def UpperCAmelCase ( self : str ) -> int:
return 12
| 114
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase , __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 114
| 1
|
"""simple docstring"""
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a = True , _a = False ):
__a = scheduler
__a = optimizers if isinstance(_a , (list, tuple) ) else [optimizers]
__a = split_batches
__a = step_with_optimizer
__a = GradientState()
def __UpperCAmelCase ( self , *_a , **_a ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_a , **_a )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_a , **_a )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__a = AcceleratorState().num_processes
for _ in range(_a ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_a , **_a )
else:
self.scheduler.step(*_a , **_a )
def __UpperCAmelCase ( self ):
return self.scheduler.get_last_lr()
def __UpperCAmelCase ( self ):
return self.scheduler.state_dict()
def __UpperCAmelCase ( self , _a ):
self.scheduler.load_state_dict(_a )
def __UpperCAmelCase ( self ):
return self.scheduler.get_lr()
def __UpperCAmelCase ( self , *_a , **_a ):
return self.scheduler.print_lr(*_a , **_a )
| 354
|
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase :int = set()
UpperCamelCase :Optional[Any] = []
def parse_line(__magic_name__ : Any ):
for line in fp:
if isinstance(__magic_name__ , __magic_name__ ):
UpperCamelCase :Tuple = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(__magic_name__ ) > 0:
UpperCamelCase :List[Any] = """\n""".join(__magic_name__ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(__magic_name__ )
buffer.clear()
continue
else:
UpperCamelCase :Any = line.strip()
buffer.append(__magic_name__ )
if from_gh:
for filename in os.listdir(__magic_name__ ):
UpperCamelCase :Tuple = os.path.join(__magic_name__ , __magic_name__ )
if not os.path.isdir(__magic_name__ ):
# read the file
if filename != "warnings.txt":
continue
with open(__magic_name__ ) as fp:
parse_line(__magic_name__ )
else:
try:
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__magic_name__ ) as fp:
parse_line(__magic_name__ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase :Optional[int] = set()
UpperCamelCase :Optional[int] = [os.path.join(__magic_name__ , __magic_name__ ) for p in os.listdir(__magic_name__ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__magic_name__ , __magic_name__ ) )
return selected_warnings
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return values.split(""",""" )
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
UpperCAmelCase_ : List[str] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase_ : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase_ : Any = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase_ : str = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 38
|
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21
| 0
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _lowerCamelCase ( _UpperCamelCase = 3 ):
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_UpperCamelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
__lowerCAmelCase = QuantumRegister(_UpperCamelCase , "qr" )
__lowerCAmelCase = ClassicalRegister(_UpperCamelCase , "cr" )
__lowerCAmelCase = QuantumCircuit(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = number_of_qubits
for i in range(_UpperCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_UpperCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _UpperCamelCase , _UpperCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_UpperCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_UpperCamelCase , _UpperCamelCase )
# simulate with 10000 shots
__lowerCAmelCase = Aer.get_backend("qasm_simulator" )
__lowerCAmelCase = execute(_UpperCamelCase , _UpperCamelCase , shots=1_0000 )
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 353
|
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Tuple = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str ="""data2vec-audio"""
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.0_2 , __a=1e-5 , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=16 , __a=19 , __a=5 , __a=0.0_5 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="sum" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ):
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = feat_extract_activation
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = conv_bias
__lowerCAmelCase = num_conv_pos_embeddings
__lowerCAmelCase = num_conv_pos_embedding_groups
__lowerCAmelCase = conv_pos_kernel_size
__lowerCAmelCase = len(self.conv_dim )
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = feat_proj_dropout
__lowerCAmelCase = final_dropout
__lowerCAmelCase = layerdrop
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = vocab_size
__lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
# ctc loss
__lowerCAmelCase = ctc_loss_reduction
__lowerCAmelCase = ctc_zero_infinity
# adapter
__lowerCAmelCase = add_adapter
__lowerCAmelCase = adapter_kernel_size
__lowerCAmelCase = adapter_stride
__lowerCAmelCase = num_adapter_layers
__lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = list(__a )
__lowerCAmelCase = xvector_output_dim
@property
def snake_case ( self ):
return math.prod(self.conv_stride )
| 259
| 0
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = 1, 1
__lowerCAmelCase : int = 2
while True:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : List[str] = fa + fa
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = fa, f
index += 1
for _ in str(_UpperCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 86
|
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
UpperCAmelCase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
UpperCAmelCase : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ = new_id
# turn into Numpy arrays
lowercase_ = np.array(__lowerCAmelCase )
lowercase_ = np.array(__lowerCAmelCase )
if reduce_labels:
lowercase_ = 2_55
lowercase_ = label - 1
lowercase_ = 2_55
lowercase_ = label != ignore_index
lowercase_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = pred_label[mask]
lowercase_ = np.array(__lowerCAmelCase )[mask]
lowercase_ = pred_label[pred_label == label]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# compute metrics
lowercase_ = {}
lowercase_ = total_area_intersect.sum() / total_area_label.sum()
lowercase_ = total_area_intersect / total_area_union
lowercase_ = total_area_intersect / total_area_label
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = all_acc
lowercase_ = iou
lowercase_ = acc
if nan_to_num is not None:
lowercase_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
}) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
lowercase_ = mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 136
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Tuple = use_attention_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Optional[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = True
lowerCAmelCase :Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = FlaxRoFormerModelTester(self)
@slow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase)
UpperCAmelCase__ : Dict = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase)
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
UpperCAmelCase__ : int = jnp.array([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : Union[str, Any] = 5_0000
UpperCAmelCase__ : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4))
| 369
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__A , __A , __A =False, False, False
@dataclass
class _snake_case :
lowerCAmelCase :Optional[int] = None
lowerCAmelCase :bool = True
lowerCAmelCase :bool = True
lowerCAmelCase :Optional[str] = None
# Automatically constructed
lowerCAmelCase :ClassVar[str] = "dict"
lowerCAmelCase :ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase :str = field(default='''Audio''' , init=a__ , repr=a__ )
def __call__( self):
return self.pa_type
def snake_case__ ( self , _lowerCamelCase):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""") from err
if isinstance(_lowerCamelCase , _lowerCamelCase):
return {"bytes": None, "path": value}
elif isinstance(_lowerCamelCase , _lowerCamelCase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ : Optional[int] = BytesIO()
sf.write(_lowerCamelCase , value["""array"""] , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""") is not None and os.path.isfile(value["""path"""]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm"""):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""")
if value.get("""bytes"""):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ : Tuple = np.frombuffer(value["""bytes"""] , dtype=np.intaa).astype(np.floataa) / 3_2767
else:
UpperCAmelCase__ : List[str] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""").astype(np.floataa) / 3_2767
UpperCAmelCase__ : List[str] = BytesIO(bytes())
sf.write(_lowerCamelCase , _lowerCamelCase , value["""sampling_rate"""] , format="""wav""")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""")}
elif value.get("""bytes""") is not None or value.get("""path""") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes"""), "path": value.get("""path""")}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""")
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = (value["""path"""], BytesIO(value["""bytes"""])) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""") from err
UpperCAmelCase__ : Dict = xsplitext(_lowerCamelCase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """)
if file is None:
UpperCAmelCase__ : int = token_per_repo_id or {}
UpperCAmelCase__ : Optional[int] = path.split("""::""")[-1]
try:
UpperCAmelCase__ : Dict = string_to_dict(_lowerCamelCase , config.HUB_DATASETS_URL)["""repo_id"""]
UpperCAmelCase__ : Dict = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ : List[Any] = None
with xopen(_lowerCamelCase , """rb""" , use_auth_token=_lowerCamelCase) as f:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = sf.read(_lowerCamelCase)
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = sf.read(_lowerCamelCase)
UpperCAmelCase__ : str = array.T
if self.mono:
UpperCAmelCase__ : List[Any] = librosa.to_mono(_lowerCamelCase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ : int = librosa.resample(_lowerCamelCase , orig_sr=_lowerCamelCase , target_sr=self.sampling_rate)
UpperCAmelCase__ : Tuple = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def snake_case__ ( self):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""")
return {
"bytes": Value("""binary"""),
"path": Value("""string"""),
}
def snake_case__ ( self , _lowerCamelCase):
if pa.types.is_string(storage.type):
UpperCAmelCase__ : Dict = pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : str = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("""array"""):
UpperCAmelCase__ : Optional[Any] = pa.array([Audio().encode_example(_lowerCamelCase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("""bytes""") >= 0:
UpperCAmelCase__ : int = storage.field("""bytes""")
else:
UpperCAmelCase__ : List[str] = pa.array([None] * len(_lowerCamelCase) , type=pa.binary())
if storage.type.get_field_index("""path""") >= 0:
UpperCAmelCase__ : List[Any] = storage.field("""path""")
else:
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCamelCase) , type=pa.string())
UpperCAmelCase__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null())
return array_cast(_lowerCamelCase , self.pa_type)
def snake_case__ ( self , _lowerCamelCase):
@no_op_if_value_is_null
def path_to_bytes(_lowerCamelCase):
with xopen(_lowerCamelCase , """rb""") as f:
UpperCAmelCase__ : int = f.read()
return bytes_
UpperCAmelCase__ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""]) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ : Optional[Any] = pa.array(
[os.path.basename(_lowerCamelCase) if path is not None else None for path in storage.field("""path""").to_pylist()] , type=pa.string() , )
UpperCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null())
return array_cast(_lowerCamelCase , self.pa_type)
| 283
| 0
|
def lowerCAmelCase ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
"""simple docstring"""
if index == r:
for j in range(_lowerCAmelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase__ = arr[i]
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 , _lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowerCAmelCase : List[str] = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 169
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 169
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '▁'
UpperCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase = {
'google/pegasus-xsum': 512,
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PegasusTokenizer
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Any="<pad>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<mask_2>" , SCREAMING_SNAKE_CASE__ : int="<mask_1>" , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : str=103 , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
lowerCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError(
f'additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE__ )}, but is'
f' {type(SCREAMING_SNAKE_CASE__ )}' )
lowerCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(SCREAMING_SNAKE_CASE__ ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE__ ) ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase__ = additional_special_tokens_extended
else:
lowerCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , mask_token_sent=SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List , SCREAMING_SNAKE_CASE__ : Optional[List] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE__ )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 221
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=512 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=0 , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = projection_dim
def a ( self : Any ) -> Any:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
lowerCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
lowerCAmelCase__ = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : Dict ) -> int:
lowerCAmelCase__ = TFDPRModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : List[str] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCAmelCase__ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 221
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : int = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""xlm"""
__UpperCAmelCase : int ={
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , __a=3_01_45 , __a=20_48 , __a=12 , __a=16 , __a=0.1 , __a=0.1 , __a=True , __a=False , __a=False , __a=False , __a=1 , __a=True , __a=5_12 , __a=20_48**-0.5 , __a=1e-1_2 , __a=0.0_2 , __a=0 , __a=1 , __a=2 , __a=3 , __a=5 , __a=True , __a="first" , __a=True , __a=None , __a=True , __a=0.1 , __a=5 , __a=5 , __a=0 , __a=0 , __a=2 , __a=0 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = emb_dim
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = gelu_activation
__lowerCAmelCase = sinusoidal_embeddings
__lowerCAmelCase = causal
__lowerCAmelCase = asm
__lowerCAmelCase = n_langs
__lowerCAmelCase = use_lang_emb
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = bos_index
__lowerCAmelCase = eos_index
__lowerCAmelCase = pad_index
__lowerCAmelCase = unk_index
__lowerCAmelCase = mask_index
__lowerCAmelCase = is_encoder
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = embed_init_std
__lowerCAmelCase = init_std
__lowerCAmelCase = summary_type
__lowerCAmelCase = summary_use_proj
__lowerCAmelCase = summary_activation
__lowerCAmelCase = summary_proj_to_labels
__lowerCAmelCase = summary_first_dropout
__lowerCAmelCase = start_n_top
__lowerCAmelCase = end_n_top
__lowerCAmelCase = mask_token_id
__lowerCAmelCase = lang_id
if "n_words" in kwargs:
__lowerCAmelCase = kwargs["n_words"]
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , **__lowerCamelCase )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 57
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11
| 0
|
"""simple docstring"""
import math
import random
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__A : Dict = 0.02
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(A_ ):
# Forward propagation
_UpperCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_UpperCAmelCase = (expected / 100) - layer_a
# Error delta
_UpperCAmelCase = layer_1_error * sigmoid_function(A_ , A_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = int(input("Expected value: "))
__A : Optional[Any] = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 358
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326
| 0
|
'''simple docstring'''
import requests
lowerCAmelCase: Optional[Any] = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def lowerCamelCase__ ( _A ):
# fetching a list of articles in json format
a : Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 297
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :List[Any] = parent
UpperCamelCase :List[str] = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Optional[Any] = patch_size
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :Any = backbone_out_indices
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Optional[int] = hidden_dropout_prob
UpperCamelCase :int = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Any = backbone_featmap_shape
UpperCamelCase :Optional[int] = scope
UpperCamelCase :Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Tuple = (image_size // patch_size) ** 2
UpperCamelCase :int = num_patches + 1
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :int = None
if self.use_labels:
UpperCamelCase :str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Tuple = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[int] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Tuple = self.num_labels
UpperCamelCase :Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :int = self.num_labels
UpperCamelCase :str = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : Union[str, Any] =False
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Tuple = [*signature.parameters.keys()]
UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :int = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :Dict = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Tuple = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :int = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :int = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = prepare_img()
UpperCamelCase :Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 259
| 0
|
"""simple docstring"""
__UpperCamelCase : int = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 350
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
A = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
A = self.dummy_uncond_unet
A = ScoreSdeVeScheduler()
A = ScoreSdeVePipeline(unet=__A ,scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
A = torch.manual_seed(0 )
A = sde_ve(num_inference_steps=2 ,output_type='numpy' ,generator=__A ).images
A = torch.manual_seed(0 )
A = sde_ve(num_inference_steps=2 ,output_type='numpy' ,generator=__A ,return_dict=__A )[
0
]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = "google/ncsnpp-church-256"
A = UNetaDModel.from_pretrained(__A )
A = ScoreSdeVeScheduler.from_pretrained(__A )
A = ScoreSdeVePipeline(unet=__A ,scheduler=__A )
sde_ve.to(__A )
sde_ve.set_progress_bar_config(disable=__A )
A = torch.manual_seed(0 )
A = sde_ve(num_inference_steps=10 ,output_type='numpy' ,generator=__A ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 74
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "openai/whisper-base"
__A : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__A : Any = "transcriber"
__A : Any = WhisperProcessor
__A : int = WhisperForConditionalGeneration
__A : Any = ["audio"]
__A : List[str] = ["text"]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__A , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 283
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_UpperCamelCase = pd.read_csv('''sample_data.csv''', header=None)
_UpperCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
_UpperCamelCase = df.iloc[:, 1:2]
_UpperCamelCase = actual_data.values.reshape(len_data, 1)
_UpperCamelCase = MinMaxScaler().fit_transform(actual_data)
_UpperCamelCase = 10
_UpperCamelCase = 5
_UpperCamelCase = 20
_UpperCamelCase = len_data - periods * look_back
_UpperCamelCase = actual_data[:division]
_UpperCamelCase = actual_data[division - look_back :]
_UpperCamelCase = [], []
_UpperCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_UpperCamelCase = np.array(train_x)
_UpperCamelCase = np.array(test_x)
_UpperCamelCase = np.array([list(i.ravel()) for i in train_y])
_UpperCamelCase = np.array([list(i.ravel()) for i in test_y])
_UpperCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_UpperCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_UpperCamelCase = model.predict(x_test)
| 370
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''OwlViTFeatureExtractor''']
_UpperCamelCase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16
| 0
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
for param in module.parameters():
A__ = False
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
A__ = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = plt.imshow(UpperCamelCase__ )
fig.axes.get_xaxis().set_visible(UpperCamelCase__ )
fig.axes.get_yaxis().set_visible(UpperCamelCase__ )
plt.show()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = datetime.now()
A__ = current_time.strftime('%H:%M:%S' )
return timestamp
| 221
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=0 ,) -> Dict:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = projection_dim
def snake_case__ ( self ) -> Optional[Any]:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = ids_tensor([self.batch_size] ,self.num_choices )
A__ = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,)
A__ = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = TFDPRContextEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = TFDPRQuestionEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = TFDPRReader(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def snake_case__ ( self ) -> int:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> str:
A__ = TFDPRModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def snake_case__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCAmelCase )
@slow
def snake_case__ ( self ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRQuestionEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRReader.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
A__ = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
A__ = model(__UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A__ = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 221
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase : Dict = True
except (ImportError, AttributeError):
UpperCAmelCase : Any = object
def __lowerCamelCase ( *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : str ):
'''simple docstring'''
pass
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : int = logging.get_logger("transformers-cli/serving")
def __lowerCamelCase ( lowerCamelCase__ : Namespace ):
'''simple docstring'''
lowerCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCamelCase__ , args.host , args.port , args.workers )
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : dict
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[str]
UpperCamelCase : Optional[List[int]]
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : str
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any
class __lowercase ( a_ ):
"""simple docstring"""
@staticmethod
def __A ( A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=A , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=A , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=A , default=88_88 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=A , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=A , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=A , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=A , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=A , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=A )
def __init__( self , A , A , A , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = pipeline
lowerCamelCase = host
lowerCamelCase = port
lowerCamelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(F'Serving model over {host}:{port}' )
lowerCamelCase = FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=A , response_class=A , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=A , response_class=A , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=A , response_class=A , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=A , response_class=A , methods=["""POST"""] , ),
] , timeout=6_00 , )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __A ( self , A = Body(A , embed=A ) , A = Body(A , embed=A ) ) -> Dict:
'''simple docstring'''
try:
lowerCamelCase = self._pipeline.tokenizer.tokenize(A )
if return_ids:
lowerCamelCase = self._pipeline.tokenizer.convert_tokens_to_ids(A )
return ServeTokenizeResult(tokens=A , tokens_ids=A )
else:
return ServeTokenizeResult(tokens=A )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(A )} )
def __A ( self , A = Body(A , embed=A ) , A = Body(A , embed=A ) , A = Body(A , embed=A ) , ) -> Optional[int]:
'''simple docstring'''
try:
lowerCamelCase = self._pipeline.tokenizer.decode(A , A , A )
return ServeDeTokenizeResult(model="""""" , text=A )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(A )} )
async def __A ( self , A=Body(A , embed=A ) ) -> str:
'''simple docstring'''
if len(A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowerCamelCase = self._pipeline(A )
return ServeForwardResult(output=A )
except Exception as e:
raise HTTPException(5_00 , {"""error""": str(A )} )
| 353
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {"vocab_file": "spiece.model"}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , A = None , **A , ) -> None:
'''simple docstring'''
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase = jieba
lowerCamelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , A ) -> Any:
'''simple docstring'''
if self.remove_space:
lowerCamelCase = """ """.join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("""NFKD""" , A )
lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def __A ( self , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.preprocess_text(A )
lowerCamelCase = self.sp_model.encode(A , out_type=A )
lowerCamelCase = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.PieceToId(A )
def __A ( self , A ) -> int:
'''simple docstring'''
return self.sp_model.IdToPiece(A )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = """""".join(A ).replace(A , """ """ ).strip()
return out_string
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1, 1]
return ([0] * len(A )) + [1, 1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def __A ( self , *A , **A ) -> int:
'''simple docstring'''
lowerCamelCase = super()._decode(*A , **A )
lowerCamelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 66
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = StableDiffusionPanoramaPipeline
_lowerCamelCase: int = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase: List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase: Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase: List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
A = DDIMScheduler()
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : List[Any]=0 ) -> List[str]:
A = torch.manual_seed(A_ )
A = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.25e-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 'french fries'
A = sd_pipe(**A_ ,negative_prompt=A_ )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ,view_batch_size=2 )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' )
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,skip_prk_steps=A_ )
A = StableDiffusionPanoramaPipeline(**A_ )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = sd_pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any]=0 ) -> int:
A = torch.manual_seed(A_ )
A = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' ,safety_checker=A_ )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
A = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = 0
def callback_fn(A_ : int ,A_ : int ,A_ : torch.FloatTensor ) -> None:
A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A = latents[0, -3:, -3:, -1]
A = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
A = latents[0, -3:, -3:, -1]
A = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A = False
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
A = self.get_inputs()
pipe(**A_ ,callback=A_ ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = 'stabilityai/stable-diffusion-2-base'
A = DDIMScheduler.from_pretrained(A_ ,subfolder='scheduler' )
A = StableDiffusionPanoramaPipeline.from_pretrained(A_ ,scheduler=A_ ,safety_checker=A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A = self.get_inputs()
A = pipe(**A_ )
A = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 74
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__( lowercase : Dict ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Any = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("unsupported backend" ):
map_nested(lowercase , lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def lowerCAmelCase__( lowercase : Dict ) -> Dict:
__snake_case : Any = [1, 2]
__snake_case : Dict = {"a": 1, "b": 2}
__snake_case : Optional[int] = {"a": [1, 2], "b": [3, 4]}
__snake_case : int = {"a": {"1": 1}, "b": 2}
__snake_case : str = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case : Dict = [2, 3]
__snake_case : Tuple = {"a": 2, "b": 3}
__snake_case : int = {"a": [2, 3], "b": [4, 5]}
__snake_case : Dict = {"a": {"1": 2}, "b": 3}
__snake_case : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase , lowercase , num_proc=lowercase ) == expected_map_nested_sa
| 326
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : str , ):
SCREAMING_SNAKE_CASE_: Tuple = path_or_paths
SCREAMING_SNAKE_CASE_: List[str] = split if split or isinstance(lowerCAmelCase__ , lowerCAmelCase__) else "train"
SCREAMING_SNAKE_CASE_: Dict = features
SCREAMING_SNAKE_CASE_: str = cache_dir
SCREAMING_SNAKE_CASE_: Tuple = keep_in_memory
SCREAMING_SNAKE_CASE_: Optional[Any] = streaming
SCREAMING_SNAKE_CASE_: Optional[int] = num_proc
SCREAMING_SNAKE_CASE_: Tuple = kwargs
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
pass
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : Optional[int] , ):
SCREAMING_SNAKE_CASE_: Optional[int] = features
SCREAMING_SNAKE_CASE_: Any = cache_dir
SCREAMING_SNAKE_CASE_: Optional[Any] = keep_in_memory
SCREAMING_SNAKE_CASE_: Union[str, Any] = streaming
SCREAMING_SNAKE_CASE_: Optional[Any] = num_proc
SCREAMING_SNAKE_CASE_: Optional[int] = kwargs
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : Tuple):
pass
| 367
|
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127
| 0
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BartphoTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
_lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "This is a là test"
_lowerCAmelCase : Optional[int] = "This is a<unk><unk> test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map)
_lowerCAmelCase : List[Any] = "This is a là test"
_lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split()
_lowerCAmelCase : str = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
| 36
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 0
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
while b:
UpperCamelCase_ , UpperCamelCase_ = b, a % b
return a
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(a__ , a % b )
def lowerCAmelCase( )-> Optional[int]:
"""simple docstring"""
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 360
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return lst
UpperCamelCase_ = 1
while i < len(SCREAMING_SNAKE_CASE_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCamelCase_ , UpperCamelCase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCamelCase_ = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE :Optional[int] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 60
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
@dataclass
class _lowerCamelCase:
lowercase_ : Optional[int] = field(
default=1_28, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
lowercase_ : Optional[int] = field(
default=_a, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
}, )
@dataclass
class _lowerCamelCase:
lowercase_ : str = field(
default=_a, metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ : str = field(
default=_a, metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Train language if it is different from the evaluation language."""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ : Optional[str] = field(
default=_a, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
lowercase_ : Optional[bool] = field(
default=_a, metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, )
lowercase_ : str = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
lowercase_ : bool = field(
default=_a, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
lowercase_ : bool = field(
default=_a, metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""}, )
def UpperCamelCase_( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowercase , _lowercase , _lowercase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowercase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase_ )
datasets.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.set_verbosity(lowerCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowercase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_lowercase : str = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowercase : List[str] = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : str = train_dataset.features['label'].names
if training_args.do_eval:
_lowercase : Optional[int] = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : List[Any] = eval_dataset.features['label'].names
if training_args.do_predict:
_lowercase : int = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : str = predict_dataset.features['label'].names
# Labels
_lowercase : Dict = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , idalabel={str(lowerCamelCase_ ): label for i, label in enumerate(lowerCamelCase_ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase_ )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_lowercase : List[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowercase : Optional[Any] = False
def preprocess_function(lowerCamelCase_ ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowerCamelCase_ , max_length=data_args.max_seq_length , truncation=lowerCamelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowercase : List[Any] = min(len(lowerCamelCase_ ) , data_args.max_train_samples )
_lowercase : str = train_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_lowercase : Optional[Any] = train_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase_ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowercase : Optional[Any] = min(len(lowerCamelCase_ ) , data_args.max_eval_samples )
_lowercase : Optional[int] = eval_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_lowercase : int = eval_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_lowercase : int = min(len(lowerCamelCase_ ) , data_args.max_predict_samples )
_lowercase : str = predict_dataset.select(range(lowerCamelCase_ ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_lowercase : Tuple = predict_dataset.map(
lowerCamelCase_ , batched=lowerCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
_lowercase : List[Any] = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
_lowercase : List[str] = p.predictions[0] if isinstance(p.predictions , lowerCamelCase_ ) else p.predictions
_lowercase : List[Any] = np.argmax(lowerCamelCase_ , axis=1 )
return metric.compute(predictions=lowerCamelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowercase : str = default_data_collator
elif training_args.fpaa:
_lowercase : Optional[int] = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 )
else:
_lowercase : Any = None
# Initialize our Trainer
_lowercase : str = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
_lowercase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
_lowercase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase : int = last_checkpoint
_lowercase : List[str] = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
_lowercase : Tuple = train_result.metrics
_lowercase : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase_ )
)
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCamelCase_ )
trainer.save_metrics('train' , lowerCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : List[Any] = trainer.evaluate(eval_dataset=lowerCamelCase_ )
_lowercase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase_ )
_lowercase : int = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('eval' , lowerCamelCase_ )
trainer.save_metrics('eval' , lowerCamelCase_ )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_lowercase , _lowercase , _lowercase : List[Any] = trainer.predict(lowerCamelCase_ , metric_key_prefix='predict' )
_lowercase : Tuple = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase_ )
)
_lowercase : Union[str, Any] = min(lowerCamelCase_ , len(lowerCamelCase_ ) )
trainer.log_metrics('predict' , lowerCamelCase_ )
trainer.save_metrics('predict' , lowerCamelCase_ )
_lowercase : List[str] = np.argmax(lowerCamelCase_ , axis=1 )
_lowercase : Optional[Any] = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCamelCase_ ):
_lowercase : Union[str, Any] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 21
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _a ( _lowercase):
_a : Tuple = '''codegen'''
_a : Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=5_0400 , _SCREAMING_SNAKE_CASE : Dict=2048 , _SCREAMING_SNAKE_CASE : Tuple=2048 , _SCREAMING_SNAKE_CASE : Any=4096 , _SCREAMING_SNAKE_CASE : Dict=28 , _SCREAMING_SNAKE_CASE : str=16 , _SCREAMING_SNAKE_CASE : List[Any]=64 , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : str="gelu_new" , _SCREAMING_SNAKE_CASE : str=0.0 , _SCREAMING_SNAKE_CASE : Any=0.0 , _SCREAMING_SNAKE_CASE : Any=0.0 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1E-5 , _SCREAMING_SNAKE_CASE : List[Any]=0.02 , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : Optional[Any]=5_0256 , _SCREAMING_SNAKE_CASE : Optional[int]=5_0256 , _SCREAMING_SNAKE_CASE : List[str]=False , **_SCREAMING_SNAKE_CASE : List[str] , )-> Dict:
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Optional[int] = n_ctx
lowerCAmelCase__ : str = n_positions
lowerCAmelCase__ : Optional[int] = n_embd
lowerCAmelCase__ : int = n_layer
lowerCAmelCase__ : List[str] = n_head
lowerCAmelCase__ : str = n_inner
lowerCAmelCase__ : List[Any] = rotary_dim
lowerCAmelCase__ : List[Any] = activation_function
lowerCAmelCase__ : Optional[Any] = resid_pdrop
lowerCAmelCase__ : List[Any] = embd_pdrop
lowerCAmelCase__ : List[Any] = attn_pdrop
lowerCAmelCase__ : List[str] = layer_norm_epsilon
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : Union[str, Any] = bos_token_id
lowerCAmelCase__ : List[str] = eos_token_id
super().__init__(
bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _a ( _lowercase):
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : PretrainedConfig , _SCREAMING_SNAKE_CASE : str = "default" , _SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , _SCREAMING_SNAKE_CASE : bool = False , )-> Union[str, Any]:
super().__init__(_SCREAMING_SNAKE_CASE , task=_SCREAMING_SNAKE_CASE , patching_specs=_SCREAMING_SNAKE_CASE , use_past=_SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , _SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
lowerCAmelCase__ : Dict = 0
@property
def UpperCAmelCase__( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
lowerCAmelCase__ : Dict = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
lowerCAmelCase__ : str = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCAmelCase__( self : int )-> int:
return self._config.n_layer
@property
def UpperCAmelCase__( self : Any )-> int:
return self._config.n_head
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : PreTrainedTokenizer , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : int = -1 , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[TensorType] = None , )-> Mapping[str, Any]:
lowerCAmelCase__ : Optional[Any] = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : Dict = seqlen + 2
lowerCAmelCase__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ : Any = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
lowerCAmelCase__ : Optional[int] = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase__ : Any = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__( self : Tuple )-> int:
return 13
| 211
|
import math
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any=0 )-> Optional[Any]: # a graph with Node 0,1,...,N-1
lowerCAmelCase__ : Optional[int] = n
lowerCAmelCase__ : List[Any] = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowerCAmelCase__ : str = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
lowerCAmelCase__ : Optional[int] = w
def UpperCAmelCase__( self : List[Any] )-> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase__ : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str )-> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 211
| 1
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a__ : Tuple = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
a__ : Tuple = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = create_model(
'''HTSAT-tiny''' , '''roberta''' , _lowercase , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=_lowercase , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Optional[int] = r""".*sequential.(\d+).*"""
SCREAMING_SNAKE_CASE : Union[str, Any] = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE : Tuple = key.replace(_lowercase , _lowercase )
if re.match(_lowercase , _lowercase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE : Union[str, Any] = re.match(_lowercase , _lowercase ).group(1 )
SCREAMING_SNAKE_CASE : Optional[int] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(_lowercase )//3}.linear.""" )
elif re.match(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = int(re.match(_lowercase , _lowercase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE : List[Any] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE : Dict = value
SCREAMING_SNAKE_CASE : str = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE : Dict = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE : Tuple = query_layer
SCREAMING_SNAKE_CASE : Optional[Any] = key_layer
SCREAMING_SNAKE_CASE : Any = value_layer
else:
SCREAMING_SNAKE_CASE : Any = value
return model_state_dict
def UpperCAmelCase_( a__ , a__ , a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = init_clap(_lowercase , enable_fusion=_lowercase )
clap_model.eval()
SCREAMING_SNAKE_CASE : Any = clap_model.state_dict()
SCREAMING_SNAKE_CASE : Dict = rename_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = ClapConfig()
SCREAMING_SNAKE_CASE : Optional[int] = enable_fusion
SCREAMING_SNAKE_CASE : Dict = ClapModel(_lowercase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowercase , strict=_lowercase )
model.save_pretrained(_lowercase )
transformers_config.save_pretrained(_lowercase )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
a__ : List[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 313
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
| 0
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCamelCase_ ( _UpperCamelCase ):
"""simple docstring"""
snake_case_ : int = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
snake_case_ : str = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
snake_case_ : str = 4
snake_case_ : Tuple = 48
snake_case_ : Union[str, Any] = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
snake_case_ : Optional[Any] = [6, 6, 6, 6]
snake_case_ : Any = 60
snake_case_ : List[str] = [6, 6, 6, 6]
snake_case_ : str = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
snake_case_ : Any = 4
snake_case_ : Optional[int] = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
snake_case_ : Optional[int] = 1
snake_case_ : int = 1
snake_case_ : str = 126
snake_case_ : Optional[Any] = 7
snake_case_ : List[Any] = 255.0
snake_case_ : Tuple = ''''''
return config
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
snake_case_ : Optional[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ : Dict = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
snake_case_ : Optional[int] = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
snake_case_ : int = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
snake_case_ : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case_ : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case_ : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case_ : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case_ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
snake_case_ : Union[str, Any] = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
snake_case_ : Dict = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
snake_case_ : Any = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
snake_case_ : Optional[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
snake_case_ : Optional[Any] = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
snake_case_ : List[str] = '''layernorm.weight'''
if name == "norm.bias":
snake_case_ : Optional[Any] = '''layernorm.bias'''
if "conv_first" in name:
snake_case_ : int = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
snake_case_ : Optional[int] = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
snake_case_ : Dict = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
snake_case_ : List[str] = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
snake_case_ : Union[str, Any] = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
snake_case_ : List[Any] = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
snake_case_ : str = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
snake_case_ : Tuple = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
snake_case_ : Optional[Any] = '''swin2sr.''' + name
return name
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ : List[str] = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
snake_case_ : List[str] = key.split('''.''' )
snake_case_ : Optional[int] = int(key_split[1] )
snake_case_ : List[str] = int(key_split[4] )
snake_case_ : Optional[int] = config.embed_dim
if "weight" in key:
snake_case_ : str = val[:dim, :]
snake_case_ : Dict = val[dim : dim * 2, :]
snake_case_ : int = val[-dim:, :]
else:
snake_case_ : List[Any] = val[:dim]
snake_case_ : Dict = val[dim : dim * 2]
snake_case_ : Optional[Any] = val[-dim:]
pass
else:
snake_case_ : Dict = val
return orig_state_dict
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
snake_case_ : Optional[Any] = get_config(_UpperCamelCase )
snake_case_ : Dict = SwinaSRForImageSuperResolution(_UpperCamelCase )
model.eval()
snake_case_ : List[str] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : Any = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
snake_case_ : List[str] = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(_UpperCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
snake_case_ : Optional[int] = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
snake_case_ : List[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('''RGB''' )
snake_case_ : List[Any] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
snake_case_ : Any = 126 if '''Jpeg''' in checkpoint_url else 256
snake_case_ : List[Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case_ : List[Any] = transforms(_UpperCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
snake_case_ : Any = pixel_values[:, 0, :, :].unsqueeze(1 )
snake_case_ : Union[str, Any] = model(_UpperCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
snake_case_ : Optional[Any] = torch.Size([1, 3, 512, 512] )
snake_case_ : List[Any] = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
snake_case_ : Dict = torch.Size([1, 3, 1_024, 1_024] )
snake_case_ : str = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
snake_case_ : Union[str, Any] = torch.Size([1, 3, 1_024, 1_024] )
snake_case_ : Dict = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
snake_case_ : Any = torch.Size([1, 3, 512, 512] )
snake_case_ : Dict = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
snake_case_ : Dict = torch.Size([1, 3, 1_024, 1_024] )
snake_case_ : Optional[int] = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCamelCase , atol=1E-3 )
print('''Looks ok!''' )
snake_case_ : List[str] = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
snake_case_ : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
lowerCAmelCase_ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 358
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : List[Any] = '''BlipImageProcessor'''
lowerCamelCase_ : Union[str, Any] = '''AutoTokenizer'''
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
# add QFormer tokenizer
snake_case_ : Optional[Any] = qformer_tokenizer
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
snake_case_ : Tuple = BatchFeature()
if text is not None:
snake_case_ : Tuple = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
encoding.update(__magic_name__ )
snake_case_ : Optional[Any] = self.qformer_tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
snake_case_ : Optional[int] = qformer_text_encoding.pop('''input_ids''' )
snake_case_ : Tuple = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
snake_case_ : Any = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : Any = os.path.join(__magic_name__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__magic_name__ )
return super().save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AutoTokenizer.from_pretrained(__magic_name__ , subfolder='''qformer_tokenizer''' )
snake_case_ : str = cls._get_arguments_from_pretrained(__magic_name__ , **__magic_name__ )
args.append(__magic_name__ )
return cls(*__magic_name__ )
| 279
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''blip_2_vision_model'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any=1_408 , UpperCAmelCase__ : Optional[int]=6_144 , UpperCAmelCase__ : List[str]=39 , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : int=224 , UpperCAmelCase__ : List[str]=14 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Dict=0.00001 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Dict=1e-10 , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
A__ = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Tuple) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''') == "blip-2":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''blip_2_qformer'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str]=30_522 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : Any=3_072 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Tuple=1e-12 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=1_408 , **UpperCAmelCase__ : Optional[int] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = cross_attention_frequency
A__ = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : str) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''') == "blip-2":
A__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''blip-2'''
UpperCAmelCase__ = True
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str=32 , **UpperCAmelCase__ : Tuple) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''')
if qformer_config is None:
A__ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''')
if text_config is None:
A__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''')
A__ = BlipaVisionConfig(**UpperCAmelCase__)
A__ = BlipaQFormerConfig(**UpperCAmelCase__)
A__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
A__ = CONFIG_MAPPING[text_model_type](**UpperCAmelCase__)
A__ = self.text_config.tie_word_embeddings
A__ = self.text_config.is_encoder_decoder
A__ = num_query_tokens
A__ = self.vision_config.hidden_size
A__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A__ = 1.0
A__ = 0.02
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , UpperCAmelCase__ : BlipaVisionConfig , UpperCAmelCase__ : BlipaQFormerConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Dict , ) ->Union[str, Any]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.qformer_config.to_dict()
A__ = self.text_config.to_dict()
A__ = self.__class__.model_type
return output
| 14
|
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = [1]
for i in range(2 ,UpperCamelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case = []
snake_case = list(range(UpperCamelCase_ ) )
# Find permutation
while factorials:
snake_case = factorials.pop()
snake_case , snake_case = divmod(UpperCamelCase_ ,UpperCamelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127
| 0
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase : Union[str, Any] = "src/transformers"
_lowercase : str = "docs/source/en"
_lowercase : Union[str, Any] = "."
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
# Find the start prompt.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
lowercase_ : int = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowercase : str = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowercase : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : str = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> List[Any]:
lowercase_ : Dict = 2 if text == """✅""" or text == """❌""" else len(UpperCAmelCase__ )
lowercase_ : List[str] = (width - text_length) // 2
lowercase_ : Dict = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase ( ) -> Any:
lowercase_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase_ : Any = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase_ : int = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase_ : List[Any] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : List[str] = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Any = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Tuple = collections.defaultdict(UpperCAmelCase__ )
lowercase_ : Optional[int] = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = None
if attr_name.endswith("""Tokenizer""" ):
lowercase_ : Optional[int] = slow_tokenizers
lowercase_ : Union[str, Any] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
lowercase_ : Optional[Any] = fast_tokenizers
lowercase_ : Dict = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : str = tf_models
lowercase_ : str = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : List[str] = flax_models
lowercase_ : int = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
lowercase_ : Tuple = pt_models
lowercase_ : Optional[int] = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase_ : int = True
break
# Try again after removing the last word in the name
lowercase_ : Optional[Any] = """""".join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
lowercase_ : Dict = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase_ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase_ : Union[str, Any] = [len(UpperCAmelCase__ ) + 2 for c in columns]
lowercase_ : int = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
lowercase_ : Tuple = """|""" + """|""".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
lowercase_ : int = {True: """✅""", False: """❌"""}
for name in model_names:
lowercase_ : str = model_name_to_prefix[name]
lowercase_ : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__ , UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )] ) + "|\n"
return table
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any]=False ) -> str:
lowercase_ : List[str] = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
lowercase_ : Dict = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowercase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 369
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
lowercase_ : Any = prime_factors(UpperCAmelCase__ )
if is_square_free(UpperCAmelCase__ ):
return -1 if len(UpperCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 0
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A__ ( __snake_case ):
def __init__( self , A_ = 101 ):
'''simple docstring'''
UpperCamelCase : Dict = length
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , A_ ):
'''simple docstring'''
return i
class A__ :
def __call__( self , A_ ):
'''simple docstring'''
return {"input_ids": torch.tensor(A_ ), "labels": torch.tensor(A_ )}
class A__ ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
UpperCamelCase : Optional[int] = nn.Linear(120 , 80 )
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A__ ( __snake_case ):
@require_torch_neuroncore
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase : List[str] = F"""--output_dir {output_dir}""".split()
UpperCamelCase : Optional[int] = ["torchrun"] + distributed_args + args
execute_subprocess_async(A_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A__ ( __snake_case ):
@require_torch_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase : List[str] = F"""--output_dir {output_dir}""".split()
UpperCamelCase : Optional[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(A_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCamelCase : List[str] = HfArgumentParser((TrainingArguments,))
__lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__lowerCamelCase : str = DummyDataset(dataset_length)
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase : Optional[Any] = list(range(len(_lowerCAmelCase ) ) )
UpperCamelCase : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__lowerCamelCase : str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCamelCase : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCamelCase : List[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCamelCase : Optional[int] = 2
__lowerCamelCase : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCamelCase : Tuple = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCamelCase : Dict = None
| 52
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Any = logging.get_logger(__name__)
def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple=False ):
lowerCAmelCase : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Tuple=False ):
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : Optional[int] = ''''''
else:
lowerCAmelCase : Union[str, Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : Tuple = in_proj_bias[: config.hidden_size]
lowerCAmelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def _snake_case ( _snake_case : Tuple ):
lowerCAmelCase : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[Any] ):
lowerCAmelCase : Optional[int] = dct.pop(_snake_case )
lowerCAmelCase : Union[str, Any] = val
def _snake_case ( ):
lowerCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Any = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any] ):
lowerCAmelCase : Any = ViTConfig()
lowerCAmelCase : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase : List[str] = True
lowerCAmelCase : int = int(vit_name[-12:-10] )
lowerCAmelCase : List[Any] = int(vit_name[-9:-6] )
else:
lowerCAmelCase : str = 1000
lowerCAmelCase : Optional[int] = '''huggingface/label-files'''
lowerCAmelCase : Any = '''imagenet-1k-id2label.json'''
lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : Optional[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase : Dict = idalabel
lowerCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase : List[str] = int(vit_name[-6:-4] )
lowerCAmelCase : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowerCAmelCase : str = 192
lowerCAmelCase : int = 768
lowerCAmelCase : List[str] = 12
lowerCAmelCase : str = 3
elif vit_name[9:].startswith('''small''' ):
lowerCAmelCase : List[str] = 384
lowerCAmelCase : Optional[int] = 1536
lowerCAmelCase : int = 12
lowerCAmelCase : str = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowerCAmelCase : List[str] = 768
lowerCAmelCase : Dict = 2304
lowerCAmelCase : Dict = 8
lowerCAmelCase : Tuple = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowerCAmelCase : Union[str, Any] = 1024
lowerCAmelCase : List[Any] = 4096
lowerCAmelCase : Union[str, Any] = 24
lowerCAmelCase : Any = 16
elif vit_name[4:].startswith('''huge''' ):
lowerCAmelCase : Any = 1280
lowerCAmelCase : str = 5120
lowerCAmelCase : Tuple = 32
lowerCAmelCase : Tuple = 16
# load original model from timm
lowerCAmelCase : Any = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase : int = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
lowerCAmelCase : Optional[Any] = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , _snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase : Any = ViTModel(_snake_case ).eval()
else:
lowerCAmelCase : Any = ViTForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase : Dict = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCAmelCase : Dict = encoding['''pixel_values''']
lowerCAmelCase : List[Any] = model(_snake_case )
if base_model:
lowerCAmelCase : Dict = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase : Dict = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case__ : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 60
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = b.T
UpperCAmelCase_ : Dict = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
UpperCAmelCase_ : Dict = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
UpperCAmelCase_ : Optional[int] = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase_ : Any = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase__ ( __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = x.reshape(-1 , 3 )
UpperCAmelCase_ : Optional[Any] = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class lowerCamelCase (lowerCamelCase_ ):
'''simple docstring'''
_snake_case : Optional[Any] = ['''pixel_values''']
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = True , **_UpperCamelCase , ) -> None:
super().__init__(**__snake_case )
UpperCAmelCase_ : Tuple = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
UpperCAmelCase_ : List[str] = get_size_dict(__snake_case )
UpperCAmelCase_ : List[Any] = np.array(__snake_case ) if clusters is not None else None
UpperCAmelCase_ : Optional[Any] = do_resize
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : Dict = resample
UpperCAmelCase_ : List[str] = do_normalize
UpperCAmelCase_ : List[Any] = do_color_quantize
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : Tuple = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
__snake_case , size=(size['height'], size['width']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , ) -> np.ndarray:
UpperCAmelCase_ : str = rescale(image=__snake_case , scale=1 / 1_2_7.5 , data_format=__snake_case )
UpperCAmelCase_ : int = image - 1
return image
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> PIL.Image.Image:
UpperCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = size if size is not None else self.size
UpperCAmelCase_ : Dict = get_size_dict(__snake_case )
UpperCAmelCase_ : Any = resample if resample is not None else self.resample
UpperCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase_ : List[Any] = clusters if clusters is not None else self.clusters
UpperCAmelCase_ : str = np.array(__snake_case )
UpperCAmelCase_ : int = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ : List[Any] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ : Optional[Any] = [self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
UpperCAmelCase_ : List[str] = [to_channel_dimension_format(__snake_case , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase_ : List[str] = np.array(__snake_case )
UpperCAmelCase_ : Dict = color_quantize(__snake_case , __snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase_ : List[Any] = images.shape[0]
UpperCAmelCase_ : Tuple = images.reshape(__snake_case , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase_ : str = list(__snake_case )
else:
UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase_ : List[str] = {'input_ids': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 371
|
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( __snake_case : int = 1_000_000 , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : defaultdict = defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 145
| 0
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowercase_ = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def lowerCAmelCase (__A = "dhaka" , __A = 5):
"""simple docstring"""
_a = min(__A , 50) # Prevent abuse!
_a = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_a = requests.get('''https://www.google.com/search''' , params=__A , headers=__A)
_a = BeautifulSoup(html.text , '''html.parser''')
_a = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script'''))))
_a = json.dumps(__A)
_a = json.loads(__A)
_a = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , __A , )
if not matched_google_image_data:
return 0
_a = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(__A) , )
_a = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , __A , )
for index, fixed_full_res_image in enumerate(__A):
if index >= max_images:
return index
_a = bytes(__A , '''ascii''').decode(
'''unicode-escape''')
_a = bytes(__A , '''ascii''').decode(
'''unicode-escape''')
_a = urllib.request.build_opener()
_a = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(__A)
_a = F'''query_{query.replace(' ' , '_')}'''
if not os.path.exists(__A):
os.makedirs(__A)
urllib.request.urlretrieve( # noqa: S310
__A , F'''{path_name}/original_size_img_{index}.jpg''')
return index
if __name__ == "__main__":
try:
lowercase_ = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("Please provide a search term.")
raise
| 211
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __A ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class __A ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : int = datasets.Audio()
__lowerCamelCase : str = 'audio'
__lowerCamelCase : Optional[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Union[str, Any] = AudioClassification(audio_column='audio' , label_column='label' )
lowercase_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
lowercase_ = AUDIO_EXTENSIONS
| 211
| 1
|
def SCREAMING_SNAKE_CASE ( ) -> Any:
UpperCamelCase__ : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCamelCase__ : List[Any] = 6
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : List[Any] = 1901
UpperCamelCase__ : str = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCamelCase__ : int = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCamelCase__ : Tuple = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCamelCase__ : Dict = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCamelCase__ : Tuple = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 371
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list:
UpperCamelCase__ : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase__ : Tuple = True
for i in range(0 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase__ , UpperCamelCase__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase__ : List[str] = False
for i in range(1 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase__ : List[Any] = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCamelCase : Any =[int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase : Optional[int] =odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 196
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
a_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class __lowerCAmelCase ( _a ):
lowerCAmelCase__ = '''optuna'''
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_optuna(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_optuna(__UpperCAmelCase )
class __lowerCAmelCase ( _a ):
lowerCAmelCase__ = '''ray'''
lowerCAmelCase__ = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_ray(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_ray(__UpperCAmelCase )
class __lowerCAmelCase ( _a ):
lowerCAmelCase__ = '''sigopt'''
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_sigopt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_sigopt(__UpperCAmelCase )
class __lowerCAmelCase ( _a ):
lowerCAmelCase__ = '''wandb'''
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_wandb(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_wandb(__UpperCAmelCase )
a_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a__ ( ):
__lowerCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
F"""{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 330
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Any = BioGptTokenizer
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = '''lower newer'''
snake_case_ : Dict = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case_ : Union[str, Any] = '''lower'''
snake_case_ : Optional[int] = ['''low''', '''er</w>''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = tokens + ['''<unk>''']
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 279
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
snake_case__ : Optional[int] = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_=-1 ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = label_idx
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Optional[Any] = mode.value
UpperCAmelCase_ : List[str] = os.path.join(snake_case_ , F'''{mode}.txt''' )
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Any = []
with open(snake_case_ , encoding='utf-8' ) as f:
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Union[str, Any] = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
guid_index += 1
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Tuple = []
else:
UpperCAmelCase_ : List[Any] = line.split(' ' )
words.append(splits[0] )
if len(snake_case_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
return examples
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(snake_case_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ : List[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(snake_case_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if path:
with open(snake_case_ , 'r' ) as f:
UpperCAmelCase_ : Optional[int] = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ : Dict = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if path:
with open(snake_case_ , 'r' ) as f:
UpperCAmelCase_ : int = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ : Tuple = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : int = mode.value
UpperCAmelCase_ : List[Any] = os.path.join(snake_case_ , F'''{mode}.txt''' )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : List[Any] = []
with open(snake_case_ , encoding='utf-8' ) as f:
for sentence in parse_incr(snake_case_ ):
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Optional[int] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(snake_case_ ) == len(snake_case_ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=snake_case_ , labels=snake_case_ ) )
guid_index += 1
return examples
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = 0
for sentence in parse_incr(snake_case_ ):
UpperCAmelCase_ : List[Any] = preds_list[example_id]
UpperCAmelCase_ : Dict = ''
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(snake_case_ )
example_id += 1
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if path:
with open(snake_case_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 274
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : int = nn.Parameter(torch.zeros(1 , snake_case_ ) )
UpperCAmelCase_ : str = nn.Parameter(torch.ones(1 , snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , ):
'''simple docstring'''
UpperCAmelCase_ : int = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
UpperCAmelCase_ : Tuple = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (embeds * self.std) + self.mean
return embeds
| 274
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Tuple = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: Optional[Any] ) -> int:
'''simple docstring'''
if "resnet-50" in model_name:
_UpperCAmelCase = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_UpperCAmelCase = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_UpperCAmelCase = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_UpperCAmelCase = 'panoptic' in model_name
if is_panoptic:
_UpperCAmelCase = 2_5_0
else:
_UpperCAmelCase = 9_1
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'coco-detection-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCAmelCase__ ( a__: Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( a__: Any , a__: Optional[int] , a__: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
_UpperCAmelCase = val
def lowerCAmelCase__ ( a__: str , a__: Union[str, Any]=False ) -> int:
'''simple docstring'''
_UpperCAmelCase = ''
if is_panoptic:
_UpperCAmelCase = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_UpperCAmelCase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase = in_proj_weight_cross_attn[:2_5_6, :]
_UpperCAmelCase = in_proj_bias_cross_attn[:2_5_6]
_UpperCAmelCase = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias_cross_attn[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight_cross_attn[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias_cross_attn[-2_5_6:]
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[Any] , a__: Any=None , a__: Dict=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_UpperCAmelCase = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_UpperCAmelCase = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_UpperCAmelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_UpperCAmelCase = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
_UpperCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
_UpperCAmelCase = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
_UpperCAmelCase = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_UpperCAmelCase = state_dict.pop(lowerCamelCase_ )
_UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_UpperCAmelCase = 'coco_panoptic' if is_panoptic else 'coco_detection'
_UpperCAmelCase = DetrImageProcessor(format=lowerCamelCase_ )
_UpperCAmelCase = processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = detr(lowerCamelCase_ )
_UpperCAmelCase = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ :Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 329
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21
| 0
|
def _UpperCamelCase (a__ :List[str] ):
"""simple docstring"""
UpperCamelCase__ = [False] * len(lowercase__ )
UpperCamelCase__ = [-1] * len(lowercase__ )
def dfs(a__ :Optional[int] , a__ :str ):
UpperCamelCase__ = True
UpperCamelCase__ = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase__ , 1 - c )
for i in range(len(lowercase__ ) ):
if not visited[i]:
dfs(lowercase__ , 0 )
for i in range(len(lowercase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
UpperCamelCase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 362
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : str = """token-classification"""
def __init__( self , __lowerCAmelCase ):
if type(__lowerCAmelCase ) == dict:
UpperCamelCase__ = Namespace(**__lowerCAmelCase )
UpperCamelCase__ = import_module("""tasks""" )
try:
UpperCamelCase__ = getattr(__lowerCAmelCase , hparams.task_type )
UpperCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
UpperCamelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ = CrossEntropyLoss().ignore_index
super().__init__(__lowerCAmelCase , len(self.labels ) , self.mode )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return self.model(**__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
UpperCamelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , __lowerCAmelCase )
UpperCamelCase__ = self.token_classification_task.convert_examples_to_features(
__lowerCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""Compute validation""" ""
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
UpperCamelCase__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
UpperCamelCase__ = np.argmax(__lowerCAmelCase , axis=2 )
UpperCamelCase__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
UpperCamelCase__ = dict(enumerate(self.labels ) )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"""precision""": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"""recall""": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"""f1""": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self , __lowerCAmelCase ):
# when stable
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
UpperCamelCase__ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self , __lowerCAmelCase ):
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__lowerCAmelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__lowerCAmelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = NERTransformer(args)
UpperCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
UpperCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 87
| 0
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] ,lowercase_ : int = 1_2_8 ,lowercase_ : int = 2_5_6 ,lowercase_ : float = 2000.0 ,lowercase_ : int = 7_6_8 ,lowercase_ : int = 1_2 ,lowercase_ : int = 1_2 ,lowercase_ : int = 6_4 ,lowercase_ : int = 2_0_4_8 ,lowercase_ : float = 0.1 ,):
super().__init__()
lowerCAmelCase__ : str = nn.Sequential(
nn.Linear(lowercase_ ,d_model * 4 ,bias=lowercase_ ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=lowercase_ ) ,nn.SiLU() ,)
lowerCAmelCase__ : List[str] = nn.Embedding(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : int = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
lowerCAmelCase__ : List[Any] = nn.Dropout(p=lowercase_ )
lowerCAmelCase__ : int = nn.ModuleList()
for lyr_num in range(lowercase_ ):
# FiLM conditional T5 decoder
lowerCAmelCase__ : List[Any] = DecoderLayer(d_model=lowercase_ ,d_kv=lowercase_ ,num_heads=lowercase_ ,d_ff=lowercase_ ,dropout_rate=lowercase_ )
self.decoders.append(lowercase_ )
lowerCAmelCase__ : Dict = TaLayerNorm(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = nn.Dropout(p=lowercase_ )
lowerCAmelCase__ : Optional[int] = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Union[str, Any] ,lowercase_ : int ):
lowerCAmelCase__ : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Union[str, Any] ,lowercase_ : Any ,lowercase_ : Any ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCAmelCase__ : int = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
lowerCAmelCase__ : str = self.conditioning_emb(lowercase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCAmelCase__ : Dict = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCAmelCase__ : Tuple = torch.broadcast_to(
torch.arange(lowercase_ ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
lowerCAmelCase__ : Tuple = self.position_encoding(lowercase_ )
lowerCAmelCase__ : Dict = self.continuous_inputs_projection(lowercase_ )
inputs += position_encodings
lowerCAmelCase__ : List[Any] = self.dropout(lowercase_ )
# decoder: No padding present.
lowerCAmelCase__ : Optional[Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCAmelCase__ : Tuple = [(x, self.encoder_decoder_mask(lowercase_ ,lowercase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCAmelCase__ : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
lowerCAmelCase__ : int = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
lowerCAmelCase__ : Any = lyr(
lowercase_ ,conditioning_emb=lowercase_ ,encoder_hidden_states=lowercase_ ,encoder_attention_mask=lowercase_ ,)[0]
lowerCAmelCase__ : Optional[Any] = self.decoder_norm(lowercase_ )
lowerCAmelCase__ : Optional[int] = self.post_dropout(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.spec_out(lowercase_ )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : Tuple ,lowercase_ : Tuple ,lowercase_ : List[str] ,lowercase_ : Any ,lowercase_ : str ,lowercase_ : Dict=1E-6 ):
super().__init__()
lowerCAmelCase__ : List[str] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase_ ,d_kv=lowercase_ ,num_heads=lowercase_ ,dropout_rate=lowercase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase_ ,d_kv=lowercase_ ,num_heads=lowercase_ ,dropout_rate=lowercase_ ,layer_norm_epsilon=lowercase_ ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase_ ,d_ff=lowercase_ ,dropout_rate=lowercase_ ,layer_norm_epsilon=lowercase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Any ,lowercase_ : Dict=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : List[Any]=None ,lowercase_ : Tuple=None ,lowercase_ : List[str]=None ,):
lowerCAmelCase__ : List[Any] = self.layer[0](
lowercase_ ,conditioning_emb=lowercase_ ,attention_mask=lowercase_ ,)
if encoder_hidden_states is not None:
lowerCAmelCase__ : Optional[Any] = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
lowerCAmelCase__ : Tuple = self.layer[1](
lowercase_ ,key_value_states=lowercase_ ,attention_mask=lowercase_ ,)
# Apply Film Conditional Feed Forward layer
lowerCAmelCase__ : int = self.layer[-1](lowercase_ ,lowercase_ )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,lowercase_ : Tuple ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : Tuple ):
super().__init__()
lowerCAmelCase__ : str = TaLayerNorm(lowercase_ )
lowerCAmelCase__ : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=lowercase_ )
lowerCAmelCase__ : Any = Attention(query_dim=lowercase_ ,heads=lowercase_ ,dim_head=lowercase_ ,out_bias=lowercase_ ,scale_qk=lowercase_ )
lowerCAmelCase__ : Tuple = nn.Dropout(lowercase_ )
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ,lowercase_ : Any=None ,lowercase_ : List[str]=None ,):
# pre_self_attention_layer_norm
lowerCAmelCase__ : str = self.layer_norm(lowercase_ )
if conditioning_emb is not None:
lowerCAmelCase__ : Any = self.FiLMLayer(lowercase_ ,lowercase_ )
# Self-attention block
lowerCAmelCase__ : int = self.attention(lowercase_ )
lowerCAmelCase__ : str = hidden_states + self.dropout(lowercase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : str ,lowercase_ : List[Any] ,lowercase_ : Union[str, Any] ,lowercase_ : Tuple ,lowercase_ : List[Any] ):
super().__init__()
lowerCAmelCase__ : Tuple = Attention(query_dim=lowercase_ ,heads=lowercase_ ,dim_head=lowercase_ ,out_bias=lowercase_ ,scale_qk=lowercase_ )
lowerCAmelCase__ : str = TaLayerNorm(lowercase_ ,eps=lowercase_ )
lowerCAmelCase__ : Dict = nn.Dropout(lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Tuple ,lowercase_ : str=None ,lowercase_ : Any=None ,):
lowerCAmelCase__ : int = self.layer_norm(lowercase_ )
lowerCAmelCase__ : int = self.attention(
lowercase_ ,encoder_hidden_states=lowercase_ ,attention_mask=attention_mask.squeeze(1 ) ,)
lowerCAmelCase__ : Optional[Any] = hidden_states + self.dropout(lowercase_ )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase_ : Any ,lowercase_ : int ,lowercase_ : int ,lowercase_ : str ):
super().__init__()
lowerCAmelCase__ : Tuple = TaDenseGatedActDense(d_model=lowercase_ ,d_ff=lowercase_ ,dropout_rate=lowercase_ )
lowerCAmelCase__ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 ,out_features=lowercase_ )
lowerCAmelCase__ : str = TaLayerNorm(lowercase_ ,eps=lowercase_ )
lowerCAmelCase__ : str = nn.Dropout(lowercase_ )
def __lowerCAmelCase ( self : str ,lowercase_ : Optional[Any] ,lowercase_ : Optional[int]=None ):
lowerCAmelCase__ : Any = self.layer_norm(lowercase_ )
if conditioning_emb is not None:
lowerCAmelCase__ : str = self.film(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.DenseReluDense(lowercase_ )
lowerCAmelCase__ : Any = hidden_states + self.dropout(lowercase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase_ : Any ,lowercase_ : str ,lowercase_ : List[Any] ):
super().__init__()
lowerCAmelCase__ : List[Any] = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
lowerCAmelCase__ : str = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
lowerCAmelCase__ : Optional[int] = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
lowerCAmelCase__ : Optional[int] = nn.Dropout(lowercase_ )
lowerCAmelCase__ : Dict = NewGELUActivation()
def __lowerCAmelCase ( self : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = self.act(self.wi_a(lowercase_ ) )
lowerCAmelCase__ : Any = self.wi_a(lowercase_ )
lowerCAmelCase__ : List[str] = hidden_gelu * hidden_linear
lowerCAmelCase__ : List[str] = self.dropout(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.wo(lowercase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase_ : Optional[int] ,lowercase_ : Union[str, Any]=1E-6 ):
super().__init__()
lowerCAmelCase__ : str = nn.Parameter(torch.ones(lowercase_ ) )
lowerCAmelCase__ : int = eps
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCAmelCase__ : Union[str, Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=lowercase_ )
lowerCAmelCase__ : Tuple = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCAmelCase__ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ,lowercase_ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowercase_ ,3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : Union[str, Any] ):
super().__init__()
lowerCAmelCase__ : Dict = nn.Linear(lowercase_ ,out_features * 2 ,bias=lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Dict ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Any = self.scale_bias(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = torch.chunk(lowercase_ ,2 ,-1 )
lowerCAmelCase__ : int = x * (1 + scale) + shift
return x
| 106
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = str(id_ )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : int = {} # {vertex:distance}
def __lt__( self : List[str] , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
return self.key < other.key
def __repr__( self : int ) -> Any:
"""simple docstring"""
return self.id
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
self.neighbors.append(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = weight
def __UpperCAmelCase ( a_: Any, a_: Optional[Any], a_: Optional[Any], a_: List[str] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], a_ )
graph[b - 1].add_edge(graph[a - 1], a_ )
def __UpperCAmelCase ( a_: list, a_: Vertex ):
_UpperCAmelCase : Optional[int] = []
for u in graph:
_UpperCAmelCase : Dict = math.inf
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = graph[:]
while q:
_UpperCAmelCase : List[Any] = min(a_ )
q.remove(a_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_UpperCAmelCase : Optional[Any] = u
_UpperCAmelCase : List[Any] = u.edges[v.id]
for i in range(1, len(a_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __UpperCAmelCase ( a_: list, a_: Vertex ):
for u in graph:
_UpperCAmelCase : Optional[Any] = math.inf
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : List[str] = list(a_ )
hq.heapify(a_ )
while h:
_UpperCAmelCase : str = hq.heappop(a_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_UpperCAmelCase : Any = u
_UpperCAmelCase : Optional[int] = u.edges[v.id]
hq.heapify(a_ )
for i in range(1, len(a_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __UpperCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 0
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_snake_case = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_snake_case = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_snake_case , _snake_case = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_snake_case = ['key_proj', 'value_proj', 'query_proj']
_snake_case = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_snake_case = key.split('.' )
if attributes[0] == "lm_head":
_snake_case = prophet
_snake_case = prophet_old
else:
_snake_case = prophet.prophetnet
_snake_case = prophet_old.model
_snake_case = False
for attribute in attributes:
if attribute in mapping:
_snake_case = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_snake_case = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_snake_case = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_snake_case = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_snake_case = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_snake_case = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ):
_snake_case = old_model.in_proj_weight.shape[0] // 3
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_snake_case = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_snake_case = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_snake_case = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_snake_case = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_snake_case = True
break
if attribute.isdigit():
_snake_case = model[int(UpperCamelCase__ )]
_snake_case = old_model[int(UpperCamelCase__ )]
else:
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_snake_case = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_snake_case = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 295
|
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __lowerCamelCase ( __UpperCamelCase):
"""simple docstring"""
UpperCamelCase__ = 'openai-gpt'
UpperCamelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase=4_0478 , UpperCAmelCase=512 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase="cls_index" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=0.1 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**lowerCAmelCase__ )
| 39
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def snake_case_ ( snake_case=32 , snake_case=10 , snake_case=1_00 , snake_case=10_26 , snake_case=True , snake_case="data/tokenized_stories_train_wikitext103.jbl" , snake_case="igf_context_pairs.jbl" , ) -> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__: List[str] = generate_datasets(
snake_case , snake_case , number=snake_case , min_len=10_26 , trim=snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__: Optional[Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
lowercase__: str = load_gpta('gpt2' ).to(snake_case )
print('computing perplexity on objective set' )
lowercase__: int = compute_perplexity(snake_case , snake_case , snake_case ).item()
print('perplexity on objective set:' , snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def snake_case_ ( snake_case , snake_case=15 , snake_case=1_28 , snake_case=1_00 , snake_case="igf_model.pt" , ) -> Optional[Any]:
set_seed(42 )
# Load pre-trained model
lowercase__: Any = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
lowercase__: Any = SecondaryLearner(snake_case )
# Train secondary learner
lowercase__: Tuple = train_secondary_learner(
snake_case , snake_case , max_epochs=snake_case , batch_size=snake_case , eval_freq=1_00 , igf_model_path=snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def snake_case_ ( snake_case , snake_case , snake_case , snake_case=32 , snake_case=10_00 , snake_case=16 , snake_case=1.0 , snake_case=recopy_gpta , snake_case=None , snake_case=10 , snake_case="gpt2_finetuned.pt" , ) -> Tuple:
lowercase__: Dict = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
lowercase__: Optional[int] = RandomSampler(snake_case )
lowercase__: Optional[int] = DataLoader(snake_case , sampler=snake_case )
lowercase__: int = max_steps // (len(snake_case )) + 1
lowercase__: Union[str, Any] = 0
lowercase__: Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case )
lowercase__ , lowercase__ , lowercase__: Union[str, Any] = recopy_model(snake_case , snake_case , snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case )
secondary_learner.eval()
lowercase__: List[Any] = []
lowercase__: str = 0
lowercase__: Tuple = []
lowercase__: Dict = []
# Compute the performance of the transformer model at the beginning
lowercase__: Optional[Any] = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print('Test perplexity, step' , snake_case , ':' , snake_case )
for epoch in range(int(snake_case ) ):
for step, example in enumerate(snake_case ):
torch.cuda.empty_cache()
lowercase__: Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__: Dict = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__: Union[str, Any] = model(snake_case , labels=snake_case )
lowercase__: Tuple = True
if secondary_learner is not None:
lowercase__: Optional[Any] = secondary_learner.forward(
torch.tensor(snake_case , dtype=torch.long , device=snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__: Optional[Any] = -1
if predicted_q < threshold:
lowercase__: str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__: List[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__: Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__: int = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print('Test perplexity, step' , snake_case , ':' , snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def snake_case_ ( ) -> str:
lowercase__: Tuple = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=snake_case , type=snake_case , required=snake_case , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=snake_case , type=snake_case , required=snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=snake_case , default=snake_case , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=snake_case , default=snake_case , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=snake_case , type=snake_case , required=snake_case , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=snake_case , type=snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=snake_case , default=snake_case , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=1_00 , type=snake_case , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=1_00 , type=snake_case , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=10_00 , type=snake_case , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=1_28 , type=snake_case , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=snake_case , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=snake_case , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=1_00 , type=snake_case , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=10_26 , type=snake_case , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=snake_case , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=snake_case , type=snake_case , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=snake_case , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=snake_case , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=snake_case , type=snake_case , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=snake_case , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
lowercase__: Tuple = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
lowercase__: List[str] = training_secondary_learner(
snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
lowercase__: Dict = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__: Tuple = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_00 , min_len=10_26 , trim=snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case , snake_case , snake_case , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=snake_case , secondary_learner=snake_case , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 196
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCamelCase ( unittest.TestCase , snake_case ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = load_tool("""text-classification""" )
self.tool.setup()
_lowercase : Tuple = load_tool("""text-classification""" ,remote=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ ,"""positive""" )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ ,"""positive""" )
def lowerCamelCase__ ( self ):
_lowercase : Any = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ ,"""positive""" )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(UpperCAmelCase_ ,"""positive""" )
| 336
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : int = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = '''xlm-roberta'''
def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 274
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A : Tuple = logging.getLogger(__name__)
def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple:
"""simple docstring"""
A__ = np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( __a :Tuple ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf_8""" ) as f:
A__ = csv.reader(__a )
A__ = []
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]:
"""simple docstring"""
A__ = []
for dataset in encoded_datasets:
A__ = len(__a )
A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
A__ = np.zeros((n_batch, 2) , dtype=np.intaa )
A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
A__ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A__ = with_conta
A__ = with_conta
A__ = len(__a ) - 1
A__ = len(__a ) - 1
A__ = with_conta
A__ = with_conta
A__ = mc_label
A__ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=__a , default="""""" )
parser.add_argument("""--eval_dataset""" , type=__a , default="""""" )
parser.add_argument("""--seed""" , type=__a , default=4_2 )
parser.add_argument("""--num_train_epochs""" , type=__a , default=3 )
parser.add_argument("""--train_batch_size""" , type=__a , default=8 )
parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=__a , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=__a , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=__a , default=0.01 )
parser.add_argument("""--lm_coef""" , type=__a , default=0.9 )
parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 )
parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A__ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A__ = ["""_start_""", """_delimiter_""", """_classify_"""]
A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
A__ = tokenizer.convert_tokens_to_ids(__a )
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(__a :Tuple ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info("""Encoding dataset...""" )
A__ = load_rocstories_dataset(args.train_dataset )
A__ = load_rocstories_dataset(args.eval_dataset )
A__ = (train_dataset, eval_dataset)
A__ = tokenize_and_encode(__a )
# Compute the max input length for the Transformer
A__ = model.config.n_positions // 2 - 2
A__ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A__ = pre_process_datasets(__a , __a , __a , *__a )
A__ , A__ = tensor_datasets[0], tensor_datasets[1]
A__ = TensorDataset(*__a )
A__ = RandomSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
A__ = TensorDataset(*__a )
A__ = SequentialSampler(__a )
A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A__ = args.max_steps
A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
A__ = list(model.named_parameters() )
A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A__ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
A__ = get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
A__ , A__ , A__ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
A__ = 0
A__ = 0
A__ = tqdm(__a , desc="""Training""" )
for step, batch in enumerate(__a ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A__ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A__ = os.path.join(args.output_dir , __a )
A__ = os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
A__ , A__ = 0, 0
A__ , A__ = 0, 0
for batch in tqdm(__a , desc="""Evaluating""" ):
A__ = tuple(t.to(__a ) for t in batch )
A__ , A__ , A__ , A__ = batch
with torch.no_grad():
A__ , A__ , A__ , A__ = model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
A__ = mc_logits.detach().cpu().numpy()
A__ = mc_labels.to("""cpu""" ).numpy()
A__ = accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A__ = eval_loss / nb_eval_steps
A__ = eval_accuracy / nb_eval_examples
A__ = tr_loss / nb_tr_steps if args.do_train else None
A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A__ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 274
| 1
|
import math
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 10001 ) -> int:
'''simple docstring'''
try:
A__ = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
A__ = []
A__ = 2
while len(UpperCAmelCase_ ) < nth:
if is_prime(UpperCAmelCase_ ):
primes.append(UpperCAmelCase_ )
num += 1
else:
num += 1
return primes[len(UpperCAmelCase_ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 357
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'bart'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple,lowercase_ : Optional[int]=5_0_2_6_5,lowercase_ : List[str]=1_0_2_4,lowercase_ : Any=1_2,lowercase_ : Optional[Any]=4_0_9_6,lowercase_ : str=1_6,lowercase_ : int=1_2,lowercase_ : Optional[Any]=4_0_9_6,lowercase_ : Any=1_6,lowercase_ : Any=0.0,lowercase_ : str=0.0,lowercase_ : Optional[Any]="gelu",lowercase_ : List[str]=1_0_2_4,lowercase_ : List[Any]=0.1,lowercase_ : Union[str, Any]=0.0,lowercase_ : Optional[int]=0.0,lowercase_ : List[Any]=0.02,lowercase_ : int=0.0,lowercase_ : Optional[Any]=False,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=3,lowercase_ : int=1,lowercase_ : int=0,lowercase_ : List[str]=2,lowercase_ : Optional[int]=True,lowercase_ : Tuple=2,lowercase_ : List[str]=2,**lowercase_ : Dict,)-> List[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = classifier_dropout
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase_,pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,decoder_start_token_id=lowercase_,forced_eos_token_id=lowercase_,**lowercase_,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated',lowercase_ ):
A__ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
class A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Dict )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A__ = {0: 'batch'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'decoder_sequence'}
A__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A__ , A__ = self.num_layers
for i in range(lowercase_ ):
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def snake_case__ ( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super().outputs
else:
A__ = super(lowercase_,self ).outputs
if self.use_past:
A__ , A__ = self.num_layers
for i in range(lowercase_ ):
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
A__ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def snake_case__ ( self : Tuple,lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
# Generate decoder inputs
A__ = seq_length if not self.use_past else 1
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
A__ = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A__ = dict(**lowercase_,**lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
A__ = common_inputs['decoder_input_ids'].shape[1]
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = decoder_seq_length + 3
A__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowercase_,lowercase_ )],dim=1 )
A__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ , A__ = self.num_layers
A__ = min(lowercase_,lowercase_ )
A__ = max(lowercase_,lowercase_ ) - min_num_layers
A__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
A__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowercase_,lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def snake_case__ ( self : List[str],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ , A__ = self.num_layers
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = common_inputs['attention_mask'].dtype
A__ = torch.cat(
[common_inputs['attention_mask'], torch.ones(lowercase_,lowercase_,dtype=lowercase_ )],dim=1 )
A__ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def snake_case__ ( self : Union[str, Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
A__ = compute_effective_axis_dimension(
lowercase_,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = tokenizer.num_special_tokens_to_add(lowercase_ )
A__ = compute_effective_axis_dimension(
lowercase_,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
A__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A__ = dict(tokenizer(lowercase_,return_tensors=lowercase_ ) )
return common_inputs
def snake_case__ ( self : Union[str, Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
elif self.task == "causal-lm":
A__ = self._generate_dummy_inputs_for_causal_lm(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
else:
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ )
return common_inputs
def snake_case__ ( self : int,lowercase_ : Tuple,lowercase_ : int,lowercase_ : int,lowercase_ : str )-> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super()._flatten_past_key_values_(lowercase_,lowercase_,lowercase_,lowercase_ )
else:
A__ = super(lowercase_,self )._flatten_past_key_values_(
lowercase_,lowercase_,lowercase_,lowercase_ )
| 282
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=__a , default=__a , required=__a , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=__a , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=__a , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=__a , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=__a , default=0 , help="cuda_id." , )
lowerCamelCase__: Union[str, Any] =parser.parse_args()
return args
def lowerCAmelCase_ ( __a , __a , __a ) -> Tuple:
"""simple docstring"""
if not len(__a ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
lowerCamelCase__ , lowerCamelCase__: Optional[int] =imgs[0].size
lowerCamelCase__: Tuple =Image.new("RGB" , size=(cols * w, rows * h) )
lowerCamelCase__ , lowerCamelCase__: Tuple =grid.size
for i, img in enumerate(__a ):
grid.paste(__a , box=(i % cols * w, i // cols * h) )
return grid
def lowerCAmelCase_ ( __a , __a="robotic cat with wings" , __a=7.5 , __a=50 , __a=1 , __a=42 , ) -> str:
"""simple docstring"""
lowerCamelCase__: List[str] =torch.Generator(pipeline.device ).manual_seed(__a )
lowerCamelCase__: List[Any] =pipeline(
__a , guidance_scale=__a , num_inference_steps=__a , generator=__a , num_images_per_prompt=__a , ).images
lowerCamelCase__: List[str] =int(math.sqrt(__a ) )
lowerCamelCase__: Dict =image_grid(__a , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__A = parse_args()
# Load models and create wrapper for stable diffusion
__A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
__A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
__A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
__A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
__A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
__A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
__A = unet.to(torch.device("cuda", args.cuda_id))
__A = pipeline.to(unet.device)
__A , __A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
__A = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 10
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
__A : deque[T] # Cache store of keys
__A : set[T] # References of the keys in cache
__A : int = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , lowercase_ : int ) -> None:
lowercase__ : int = deque()
lowercase__ : str = set()
if not n:
lowercase__ : str = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
lowercase__ : List[Any] = n
def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase__ : Dict = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __UpperCamelCase ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Optional[int] ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 87
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
__lowerCAmelCase = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE_ ):
if len(SCREAMING_SNAKE_CASE_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE_ ) )
return data_lists
def _a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
__lowerCAmelCase = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = max(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__lowerCAmelCase = F"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
score_lists.append(SCREAMING_SNAKE_CASE_ )
return score_lists
def _a ( SCREAMING_SNAKE_CASE_ : list[list[float]] ):
__lowerCAmelCase = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = final_scores[j] + ele
return final_scores
def _a ( SCREAMING_SNAKE_CASE_ : list[list[float]] , SCREAMING_SNAKE_CASE_ : list[int] ):
__lowerCAmelCase = get_data(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = calculate_each_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = generate_final_scores(SCREAMING_SNAKE_CASE_ )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE_ ):
source_data[i].append(SCREAMING_SNAKE_CASE_ )
return source_data
| 102
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase__ = """\
"""
UpperCamelCase__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = 1_6 , _A = True , _A=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = "cuda"
else:
__lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_A )
__lowerCAmelCase = model.to(_A )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , return_tensors="pt" , return_attention_mask=_A , ).to(_A )
__lowerCAmelCase = encodings["input_ids"]
__lowerCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_A ) , _A ) ):
__lowerCAmelCase = min(start_index + batch_size , len(_A ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_A )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_A ), attn_mask] , dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(_A , attention_mask=_A ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_A )}
| 102
| 1
|
from random import randint, random
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = False , lowercase__ = 5 , ) -> list:
'''simple docstring'''
__lowercase= [[-1] * number_of_cells] # Create a highway without any car
__lowercase= 0
__lowercase= max(lowercase__ , 0 )
while i < number_of_cells:
__lowercase= (
randint(0 , lowercase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= 0
__lowercase= highway_now[car_index + 1 :]
for cell in range(len(lowercase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase__ , -1 )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> list:
'''simple docstring'''
__lowercase= len(lowercase__ )
# Beforce calculations, the highway is empty
__lowercase= [-1] * number_of_cells
for car_index in range(lowercase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__lowercase= min(highway_now[car_index] + 1 , lowercase__ )
# Number of empty cell before the next car
__lowercase= get_distance(lowercase__ , lowercase__ ) - 1
# We can't have the car causing an accident
__lowercase= min(next_highway[car_index] , lowercase__ )
if random() < probability:
# Randomly, a driver will slow down
__lowercase= max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> list:
'''simple docstring'''
__lowercase= len(highway[0] )
for i in range(lowercase__ ):
__lowercase= update(highway[i] , lowercase__ , lowercase__ )
__lowercase= [-1] * number_of_cells
for car_index in range(lowercase__ ):
__lowercase= next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__lowercase= (car_index + speed) % number_of_cells
# Commit the change of position
__lowercase= speed
highway.append(lowercase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list:
'''simple docstring'''
__lowercase= []
__lowercase= 0
for index, char in enumerate(lowercase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__lowercase= index + 1
elif index + 1 == len(lowercase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 295
| 1
|
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __UpperCamelCase ( UpperCAmelCase ) -> Union[str, Any]:
lowercase__ : str = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __UpperCamelCase ( UpperCAmelCase ) -> Optional[int]:
return (gray > 127) & (gray <= 255)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
lowercase__ : List[Any] = np.zeros_like(UpperCAmelCase )
lowercase__ : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowercase__ : str = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowercase__ : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowercase__ : Optional[Any] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__a: List[str] = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
__a: int = np.array(Image.open(lena_path))
# kernel to be applied
__a: Tuple = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__a: Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__a: Optional[int] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 365
|
'''simple docstring'''
import qiskit
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowercase__ : Any = qiskit.QuantumCircuit(UpperCAmelCase , UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase__ : Any = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 214
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCAmelCase ( unittest.TestCase , lowerCamelCase__ ):
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = load_tool('''text-classification''' )
self.tool.setup()
UpperCAmelCase : List[str] = load_tool('''text-classification''', remote=__A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = self.tool('''That\'s quite cool''', ['''positive''', '''negative'''] )
self.assertEqual(__A, '''positive''' )
def __magic_name__ ( self : str ):
UpperCAmelCase : str = self.remote_tool('''That\'s quite cool''', ['''positive''', '''negative'''] )
self.assertEqual(__A, '''positive''' )
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = self.tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] )
self.assertEqual(__A, '''positive''' )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = self.remote_tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] )
self.assertEqual(__A, '''positive''' )
| 336
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''camembert-base''': 512,
}
__snake_case = '''▁'''
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : int = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = ['input_ids', 'attention_mask']
A_ : str = CamembertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCAmelCase , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
_a = vocab_file
_a = False if not self.vocab_file else True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 153
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase, torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_a = load_file(_lowerCAmelCase )
_a = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_a = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_a = pipeline.text_encoder
else:
_a = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_a = pipeline.unet
# find the target layer
_a = layer_infos.pop(0 )
while len(_lowerCAmelCase ) > -1:
try:
_a = curr_layer.__getattr__(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = layer_infos.pop(0 )
elif len(_lowerCAmelCase ) == 0:
break
except Exception:
if len(_lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_a = layer_infos.pop(0 )
_a = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''', '''lora_up''' ) )
pair_keys.append(_lowerCAmelCase )
else:
pair_keys.append(_lowerCAmelCase )
pair_keys.append(key.replace('''lora_up''', '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_a = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_a = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase, _lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
_a = state_dict[pair_keys[0]].to(torch.floataa )
_a = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_lowerCAmelCase, _lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(_lowerCAmelCase )
return pipeline
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
__snake_case = parser.parse_args()
__snake_case = args.base_model_path
__snake_case = args.checkpoint_path
__snake_case = args.dump_path
__snake_case = args.lora_prefix_unet
__snake_case = args.lora_prefix_text_encoder
__snake_case = args.alpha
__snake_case = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__snake_case = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 153
| 1
|
import warnings
from ..trainer import Trainer
from ..utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , UpperCamelCase__ , )
super().__init__(args=UpperCamelCase__ , **UpperCamelCase__ )
| 88
|
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 282
| 0
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowercase_ = logging.get_logger(__name__)
lowercase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = field(
default=__lowerCamelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(__lowerCamelCase )} )
lowerCamelCase = field(
default=__lowerCamelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCamelCase = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCamelCase = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCamelCase = field(
default=__lowerCamelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class A ( __lowerCamelCase ):
"""simple docstring"""
lowerCamelCase = """train"""
lowerCamelCase = """dev"""
class A ( __lowerCamelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
def __init__( self : Dict,lowercase_ : SquadDataTrainingArguments,lowercase_ : PreTrainedTokenizer,lowercase_ : Optional[int] = None,lowercase_ : Union[str, Split] = Split.train,lowercase_ : Optional[bool] = False,lowercase_ : Optional[str] = None,lowercase_ : Optional[str] = "pt",)-> Optional[Any]:
'''simple docstring'''
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowercase,__lowercase ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A__ = mode
# Load data features from cache or dataset file
A__ = '''v2''' if args.version_2_with_negative else '''v1'''
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + '''.lock'''
with FileLock(__lowercase ):
if os.path.exists(__lowercase ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(__lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features['''features''']
A__ = self.old_features.get('dataset',__lowercase )
A__ = self.old_features.get('examples',__lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__ = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__lowercase,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__lowercase,)
A__ = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples},__lowercase,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : List[Any] )-> int:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int],lowercase_ : str )-> List[str]:
'''simple docstring'''
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids,dtype=torch.long )
A__ = torch.tensor(feature.attention_mask,dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids,dtype=torch.long )
A__ = torch.tensor(feature.cls_index,dtype=torch.long )
A__ = torch.tensor(feature.p_mask,dtype=torch.float )
A__ = torch.tensor(feature.is_impossible,dtype=torch.float )
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position,dtype=torch.long )
A__ = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 350
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="attention" ) -> Union[str, Any]:
'''simple docstring'''
A__ = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=False ) -> str:
'''simple docstring'''
if split_mlp_wi:
A__ = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
A__ = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
A__ = (wi_a, wi_a)
else:
A__ = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
A__ = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def _snake_case( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ) -> int:
'''simple docstring'''
A__ = traverse_util.flatten_dict(variables['target'] )
A__ = {'/'.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE__ )
A__ = collections.OrderedDict()
# Shared embeddings.
A__ = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (MLP).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_mlp_layer_norm' )
A__ , A__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , SCREAMING_SNAKE_CASE__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old[
'encoder/relpos_bias/rel_embedding'
].T
A__ = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_self_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'self_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (Cross Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_cross_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'encoder_decoder_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 2 (MLP).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_mlp_layer_norm' )
A__ , A__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , SCREAMING_SNAKE_CASE__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old['decoder/decoder_norm/scale']
A__ = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ = old['decoder/logits_dense/kernel'].T
return new
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
A__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
A__ = state_dict['shared.weight']
return state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
A__ = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ )
A__ = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : bool = False ) -> Any:
'''simple docstring'''
A__ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ = TaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
A__ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('Done' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 282
| 0
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
SCREAMING_SNAKE_CASE : List[str] = 6_378_137.0
SCREAMING_SNAKE_CASE : Tuple = 6_356_752.314_245
SCREAMING_SNAKE_CASE : Dict = 637_8137
def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : Any = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case : List[Any] = atan((1 - flattening) * tan(radians(_snake_case ) ) )
__snake_case : str = atan((1 - flattening) * tan(radians(_snake_case ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case : Union[str, Any] = haversine_distance(_snake_case , _snake_case , _snake_case , _snake_case ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case : Union[str, Any] = (b_lata + b_lata) / 2
__snake_case : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case : List[str] = (sin(_snake_case ) ** 2) * (cos(_snake_case ) ** 2)
__snake_case : Optional[Any] = cos(sigma / 2 ) ** 2
__snake_case : Optional[int] = (sigma - sin(_snake_case )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case : Optional[Any] = (cos(_snake_case ) ** 2) * (sin(_snake_case ) ** 2)
__snake_case : List[Any] = sin(sigma / 2 ) ** 2
__snake_case : Optional[int] = (sigma + sin(_snake_case )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 102
| 1
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _lowercase ( __lowerCAmelCase ) -> Optional[int]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _lowercase ( ) -> str:
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE__ : Any = [1, 2, 3]
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=2 )
with pytest.raises(__lowerCAmelCase ):
with parallel_backend("""unsupported backend""" ):
map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = [1, 2]
SCREAMING_SNAKE_CASE__ : Dict = {"""a""": 1, """b""": 2}
SCREAMING_SNAKE_CASE__ : List[str] = {"""a""": [1, 2], """b""": [3, 4]}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
SCREAMING_SNAKE_CASE__ : List[Any] = [2, 3]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""a""": 2, """b""": 3}
SCREAMING_SNAKE_CASE__ : str = {"""a""": [2, 3], """b""": [4, 5]}
SCREAMING_SNAKE_CASE__ : Tuple = {"""a""": {"""1""": 2}, """b""": 3}
SCREAMING_SNAKE_CASE__ : Tuple = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) == expected_map_nested_sa
| 358
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a :Optional[int] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase ) -> List[int]:
if isinstance(__lowerCAmelCase , np.ndarray ):
return list(tensor.shape )
SCREAMING_SNAKE_CASE__ : int = tf.shape(__lowerCAmelCase )
if tensor.shape == tf.TensorShape(__lowerCAmelCase ):
return dynamic
SCREAMING_SNAKE_CASE__ : List[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCAmelCase )]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1E-9 , axis=__lowerCAmelCase , name=__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-5 , __lowerCAmelCase=-1 ) -> List[Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = tf.nn.moments(__lowerCAmelCase , axes=[axis] , keepdims=__lowerCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
SCREAMING_SNAKE_CASE__ : str = [1] * inputs.shape.rank
SCREAMING_SNAKE_CASE__ : Optional[int] = shape_list(__lowerCAmelCase )[axis]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
SCREAMING_SNAKE_CASE__ : Any = tf.nn.batch_normalization(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , offset=__lowerCAmelCase , scale=__lowerCAmelCase , variance_epsilon=__lowerCAmelCase , )
return outputs
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=-1 ) -> Optional[Any]:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.shape(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
SCREAMING_SNAKE_CASE__ : int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> tf.Tensor:
if not isinstance(__lowerCAmelCase , tf.Tensor ):
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor(__lowerCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
SCREAMING_SNAKE_CASE__ : Any = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "input_ids" ) -> None:
tf.debugging.assert_less(
__lowerCAmelCase , tf.cast(__lowerCAmelCase , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCAmelCase )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
SCREAMING_SNAKE_CASE__ : List[str] = [x for x in data if len(__lowerCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
SCREAMING_SNAKE_CASE__ : Any = np.asarray(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array_split(__lowerCAmelCase , __lowerCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
SCREAMING_SNAKE_CASE__ : List[str] = np.array_split(__lowerCAmelCase , __lowerCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = chunk_data
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = data
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
if name in group.attrs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(__lowerCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(__lowerCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
def _expand_single_ad_tensor(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCAmelCase )
| 56
| 0
|
import sys
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_)
lowerCAmelCase__ : Optional[int] = [[0 for x in range(SCREAMING_SNAKE_CASE_)] for x in range(SCREAMING_SNAKE_CASE_)]
lowerCAmelCase__ : Dict = [[0 for x in range(SCREAMING_SNAKE_CASE_)] for x in range(SCREAMING_SNAKE_CASE_)]
for chain_length in range(2 ,SCREAMING_SNAKE_CASE_):
for a in range(1 ,n - chain_length + 1):
lowerCAmelCase__ : List[str] = a + chain_length - 1
lowerCAmelCase__ : int = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_):
lowerCAmelCase__ : str = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase__ : Union[str, Any] = cost
lowerCAmelCase__ : str = c
return matrix, sol
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
if i == j:
print('''A''' + str(SCREAMING_SNAKE_CASE_) ,end=''' ''')
else:
print('''(''' ,end=''' ''')
print_optiomal_solution(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,optimal_solution[i][j])
print_optiomal_solution(SCREAMING_SNAKE_CASE_ ,optimal_solution[i][j] + 1 ,SCREAMING_SNAKE_CASE_)
print(''')''' ,end=''' ''')
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = [30, 35, 15, 5, 10, 20, 25]
lowerCAmelCase__ : int = len(SCREAMING_SNAKE_CASE_)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase__ : str = matrix_chain_order(SCREAMING_SNAKE_CASE_)
print('''No. of Operation required: ''' + str(matrix[1][n - 1]))
print_optiomal_solution(SCREAMING_SNAKE_CASE_ ,1 ,n - 1)
if __name__ == "__main__":
main()
| 129
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a , a = 0):
lowercase__ , lowercase__ : Dict = row, column
lowercase__ : Optional[Any] = [[default_value for c in range(a)] for r in range(a)]
def __str__( self):
lowercase__ : Tuple = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowercase__ : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
lowercase__ : List[str] = max(a , len(str(a)))
lowercase__ : Optional[int] = f"""%{max_element_length}s"""
# Make string and return
def single_line(a) -> str:
nonlocal string_format_identifier
lowercase__ : Tuple = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(a) for row_vector in self.array)
return s
def __repr__( self):
return str(self)
def snake_case_ ( self , a):
if not (isinstance(a , (list, tuple)) and len(a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , a):
assert self.validate_indicies(a)
return self.array[loc[0]][loc[1]]
def __setitem__( self , a , a):
assert self.validate_indicies(a)
lowercase__ : List[str] = value
def __add__( self , a):
assert isinstance(a , a)
assert self.row == another.row and self.column == another.column
# Add
lowercase__ : List[str] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowercase__ : Dict = self[r, c] + another[r, c]
return result
def __neg__( self):
lowercase__ : List[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowercase__ : int = -self[r, c]
return result
def __sub__( self , a):
return self + (-another)
def __mul__( self , a):
if isinstance(a , (int, float)): # Scalar multiplication
lowercase__ : Optional[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowercase__ : Union[str, Any] = self[r, c] * another
return result
elif isinstance(a , a): # Matrix multiplication
assert self.column == another.row
lowercase__ : List[Any] = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowercase__ : Optional[Any] = f"""Unsupported type given for another ({type(a)})"""
raise TypeError(a)
def snake_case_ ( self):
lowercase__ : Optional[int] = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
lowercase__ : str = self[r, c]
return result
def snake_case_ ( self , a , a):
assert isinstance(a , a) and isinstance(a , a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowercase__ : int = v.transpose()
lowercase__ : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowercase__ : str = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
lowercase__ : Optional[Any] = Matrix(3 , 1 , 0 )
lowercase__ , lowercase__ , lowercase__ : List[Any] = 1, 2, -3
lowercase__ : Union[str, Any] = Matrix(3 , 1 , 0 )
lowercase__ , lowercase__ , lowercase__ : Any = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
def snake_case__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 214
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def A (__A : Any , __A : List[Any]=1.0 , __A : Any=None , __A : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
UpperCAmelCase_ = global_rng
UpperCAmelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self : int , _snake_case : Optional[Any] , _snake_case : Optional[int]=7 , _snake_case : Optional[int]=400 , _snake_case : Optional[int]=2000 , _snake_case : Any=1 , _snake_case : str=0.0 , _snake_case : int=16000 , _snake_case : int=True , _snake_case : Optional[int]=True , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = min_seq_length
UpperCAmelCase_ = max_seq_length
UpperCAmelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ = feature_size
UpperCAmelCase_ = padding_value
UpperCAmelCase_ = sampling_rate
UpperCAmelCase_ = return_attention_mask
UpperCAmelCase_ = do_normalize
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : Any , _snake_case : Any=False , _snake_case : Any=False):
"""simple docstring"""
def _flatten(_snake_case : Dict):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE))
if equal_length:
UpperCAmelCase_ = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
UpperCAmelCase_ = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
UpperCAmelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( _A , unittest.TestCase ):
UpperCAmelCase__ : int = ASTFeatureExtractor
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ASTFeatureExtractionTester(self)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ = [floats_list((1, x))[0] for x in range(800 , 1400 , 200)]
UpperCAmelCase_ = [np.asarray(__SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ = feat_extract(speech_inputs[0] , return_tensors='''np''').input_values
UpperCAmelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''').input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3))
# Test batched
UpperCAmelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''').input_values
UpperCAmelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3))
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ = [floats_list((1, x))[0] for x in (800, 800, 800)]
UpperCAmelCase_ = np.asarray(__SCREAMING_SNAKE_CASE)
UpperCAmelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='''np''').input_values
UpperCAmelCase_ = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3))
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
import torch
UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
UpperCAmelCase_ = np.random.rand(100).astype(np.floataa)
UpperCAmelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
UpperCAmelCase_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Dict):
"""simple docstring"""
from datasets import load_dataset
UpperCAmelCase_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
UpperCAmelCase_ = ds.sort('''id''').select(range(__SCREAMING_SNAKE_CASE))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9])
# fmt: on
UpperCAmelCase_ = self._load_datasamples(1)
UpperCAmelCase_ = ASTFeatureExtractor()
UpperCAmelCase_ = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').input_values
self.assertEquals(input_values.shape , (1, 1024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __SCREAMING_SNAKE_CASE , atol=1e-4))
| 362
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7
| 0
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def snake_case_ (self , __a , __a , __a=False ) -> List[str]:
UpperCamelCase = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 153
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase__ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
lowerCAmelCase__ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
lowerCAmelCase__ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
def snake_case_ (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def snake_case_ (self , __a , __a , __a=None , __a=True , __a=False ) -> Union[str, Any]:
if rouge_types is None:
UpperCamelCase = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(__a , __a ):
UpperCamelCase = scorer.score(__a , __a )
if use_aggregator:
aggregator.add_scores(__a )
else:
scores.append(__a )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 153
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : int = b.T
a : str = np.sum(np.square(A_ ) , axis=1 )
a : int = np.sum(np.square(A_ ) , axis=0 )
a : int = np.matmul(A_ , A_ )
a : int = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
a : Any = x.reshape(-1 , 3 )
a : List[str] = squared_euclidean_distance(A_ , A_ )
return np.argmin(A_ , axis=1 )
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Tuple = ["""pixel_values"""]
def __init__( self : List[Any] , __UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Optional[Any] , ):
super().__init__(**__UpperCAmelCase)
a : Dict = size if size is not None else {"height": 256, "width": 256}
a : Tuple = get_size_dict(__UpperCAmelCase)
a : Dict = np.array(__UpperCAmelCase) if clusters is not None else None
a : Optional[Any] = do_resize
a : Dict = size
a : Any = resample
a : int = do_normalize
a : Tuple = do_color_quantize
def __snake_case ( self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : int , ):
a : List[Any] = get_size_dict(__UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''')
return resize(
__UpperCAmelCase , size=(size["height"], size["width"]) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ):
a : str = rescale(image=__UpperCAmelCase , scale=1 / 127.5 , data_format=__UpperCAmelCase)
a : Optional[Any] = image - 1
return image
def __snake_case ( self : Dict , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__UpperCAmelCase : int , ):
a : List[str] = do_resize if do_resize is not None else self.do_resize
a : Dict = size if size is not None else self.size
a : List[str] = get_size_dict(__UpperCAmelCase)
a : List[str] = resample if resample is not None else self.resample
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : Dict = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a : Tuple = clusters if clusters is not None else self.clusters
a : Dict = np.array(__UpperCAmelCase)
a : List[Any] = make_list_of_images(__UpperCAmelCase)
if not valid_images(__UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
a : int = [to_numpy_array(__UpperCAmelCase) for image in images]
if do_resize:
a : Optional[Any] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase) for image in images]
if do_normalize:
a : int = [self.normalize(image=__UpperCAmelCase) for image in images]
if do_color_quantize:
a : Union[str, Any] = [to_channel_dimension_format(__UpperCAmelCase , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a : int = np.array(__UpperCAmelCase)
a : List[str] = color_quantize(__UpperCAmelCase , __UpperCAmelCase).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
a : Optional[Any] = images.shape[0]
a : List[Any] = images.reshape(__UpperCAmelCase , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
a : Optional[int] = list(__UpperCAmelCase)
else:
a : Union[str, Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase) for image in images]
a : Union[str, Any] = {"input_ids": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase)
| 350
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """longformer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[List[int], int] = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 30522 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 3072 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 1e-12 , __UpperCAmelCase : bool = False , **__UpperCAmelCase : int , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : int = attention_window
a : List[str] = sep_token_id
a : List[Any] = bos_token_id
a : int = eos_token_id
a : Union[str, Any] = vocab_size
a : Dict = hidden_size
a : int = num_hidden_layers
a : int = num_attention_heads
a : Optional[int] = hidden_act
a : Union[str, Any] = intermediate_size
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Dict = type_vocab_size
a : Optional[Any] = initializer_range
a : List[Any] = layer_norm_eps
a : List[str] = onnx_export
class _A ( _a ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : "PretrainedConfig" , __UpperCAmelCase : str = "default" , __UpperCAmelCase : "List[PatchingSpec]" = None):
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = True
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
])
@property
def __snake_case ( self : Union[str, Any]):
a : str = super().outputs
if self.task == "default":
a : Tuple = {0: "batch"}
return outputs
@property
def __snake_case ( self : Optional[int]):
return 1e-4
@property
def __snake_case ( self : str):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def __snake_case ( self : List[str] , __UpperCAmelCase : "PreTrainedTokenizerBase" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
a : str = super().generate_dummy_inputs(
preprocessor=__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a : Union[str, Any] = torch.zeros_like(inputs["input_ids"])
# make every second token global
a : Dict = 1
return inputs
| 226
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCAmelCase = 50_0000
_lowerCAmelCase , _lowerCAmelCase = os.path.split(__file__)
_lowerCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = dataset.map(**UpperCamelCase )
@get_duration
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = dataset.filter(**UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[str] = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCAmelCase__ : Union[str, Any] = generate_example_dataset(
os.path.join(UpperCamelCase , """dataset.arrow""" ) , UpperCamelCase , num_examples=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase )
def tokenize(UpperCamelCase ):
return tokenizer(examples["""text"""] )
lowerCAmelCase__ : int = map(UpperCamelCase )
lowerCAmelCase__ : List[Any] = map(UpperCamelCase , batched=UpperCamelCase )
lowerCAmelCase__ : int = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCAmelCase__ : Optional[Any] = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCAmelCase__ : Dict = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowerCAmelCase__ : Any = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowerCAmelCase__ : Dict = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = map(UpperCamelCase , function=UpperCamelCase , batched=UpperCamelCase )
lowerCAmelCase__ : Any = filter(UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 37
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 282
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['image_processor']
UpperCAmelCase_ = 'SamImageProcessor'
def __init__( self : Tuple , lowercase__ : Dict):
'''simple docstring'''
super().__init__(lowercase__)
lowerCAmelCase__ = self.image_processor
lowerCAmelCase__ = -10
lowerCAmelCase__ = self.image_processor.size['longest_edge']
def __call__( self : List[Any] , lowercase__ : Optional[int]=None , lowercase__ : Any=None , lowercase__ : Tuple=None , lowercase__ : List[str]=None , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = self.image_processor(
lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# pop arguments that are not used in the foward but used nevertheless
lowerCAmelCase__ = encoding_image_processor['original_sizes']
if hasattr(lowercase__ , 'numpy'): # Checks if Torch or TF tensor
lowerCAmelCase__ = original_sizes.numpy()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._check_and_preprocess_points(
input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , )
lowerCAmelCase__ = self._normalize_and_convert(
lowercase__ , lowercase__ , input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , return_tensors=lowercase__ , )
return encoding_image_processor
def __snake_case ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : str=None , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : Optional[Any]="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(lowercase__) != len(lowercase__):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0]) for point in input_points
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__)
for point, original_size in zip(lowercase__ , lowercase__)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowerCAmelCase__ , lowerCAmelCase__ = self._pad_points_and_labels(lowercase__ , lowercase__)
lowerCAmelCase__ = np.array(lowercase__)
if input_labels is not None:
lowerCAmelCase__ = np.array(lowercase__)
if input_boxes is not None:
if len(lowercase__) != len(lowercase__):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0] , is_bounding_box=lowercase__)
for box in input_boxes
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__ , is_bounding_box=lowercase__)
for box, original_size in zip(lowercase__ , lowercase__)
]
lowerCAmelCase__ = np.array(lowercase__)
if input_boxes is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# boxes batch size of 1 by default
lowerCAmelCase__ = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# boxes batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'input_points': input_points})
if input_labels is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels})
return encoding_image_processor
def __snake_case ( self : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = max([point.shape[0] for point in input_points])
lowerCAmelCase__ = []
for i, point in enumerate(lowercase__):
if point.shape[0] != expected_nb_points:
lowerCAmelCase__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowerCAmelCase__ = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(lowercase__)
lowerCAmelCase__ = processed_input_points
return input_points, input_labels
def __snake_case ( self : Optional[Any] , lowercase__ : int , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : Optional[Any]=False):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = original_size
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor._get_preprocess_shape(lowercase__ , longest_edge=lowercase__)
lowerCAmelCase__ = deepcopy(lowercase__).astype(lowercase__)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 , 2 , 2)
lowerCAmelCase__ = coords[..., 0] * (new_w / old_w)
lowerCAmelCase__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 , 4)
return coords
def __snake_case ( self : Dict , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : int=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(lowercase__ , 'numpy'): # Checks for TF or Torch tensor
lowerCAmelCase__ = input_points.numpy().tolist()
if not isinstance(lowercase__ , lowercase__) or not isinstance(input_points[0] , lowercase__):
raise ValueError('Input points must be a list of list of floating points.')
lowerCAmelCase__ = [np.array(lowercase__) for input_point in input_points]
else:
lowerCAmelCase__ = None
if input_labels is not None:
if hasattr(lowercase__ , 'numpy'):
lowerCAmelCase__ = input_labels.numpy().tolist()
if not isinstance(lowercase__ , lowercase__) or not isinstance(input_labels[0] , lowercase__):
raise ValueError('Input labels must be a list of list integers.')
lowerCAmelCase__ = [np.array(lowercase__) for label in input_labels]
else:
lowerCAmelCase__ = None
if input_boxes is not None:
if hasattr(lowercase__ , 'numpy'):
lowerCAmelCase__ = input_boxes.numpy().tolist()
if (
not isinstance(lowercase__ , lowercase__)
or not isinstance(input_boxes[0] , lowercase__)
or not isinstance(input_boxes[0][0] , lowercase__)
):
raise ValueError('Input boxes must be a list of list of list of floating points.')
lowerCAmelCase__ = [np.array(lowercase__).astype(np.floataa) for box in input_boxes]
else:
lowerCAmelCase__ = None
return input_points, input_labels, input_boxes
@property
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase__))
def __snake_case ( self : int , *lowercase__ : int , **lowercase__ : int):
'''simple docstring'''
return self.image_processor.post_process_masks(*lowercase__ , **lowercase__)
| 350
|
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ = 300 # TEMPERATURE (unit = K)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119
| 0
|
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
"""simple docstring"""
def A__ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ )-> List[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
__UpperCamelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__UpperCamelCase = truncation
__UpperCamelCase = tokenize_kwargs
__UpperCamelCase = {}
if return_tensors is not None:
__UpperCamelCase = return_tensors
return preprocess_params, {}, postprocess_params
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
__UpperCamelCase = self.framework
__UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
return model_inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.model(**lowercase_ )
return model_outputs
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False )-> Union[str, Any]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
return super().__call__(*lowercase_ , **lowercase_ )
| 328
|
'''simple docstring'''
import re
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
snake_case_ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(__UpperCAmelCase, __UpperCAmelCase ) )
if __name__ == "__main__":
a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 56
| 0
|
"""simple docstring"""
A = {
'''km/h''': 1.0,
'''m/s''': 3.6,
'''mph''': 1.609344,
'''knot''': 1.852,
}
A = {
'''km/h''': 1.0,
'''m/s''': 0.277777778,
'''mph''': 0.621371192,
'''knot''': 0.539956803,
}
def __A ( a_ :float , a_ :str , a_ :str) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__a : Dict = (
F"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(a_)}"""
)
raise ValueError(a_)
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model'''}
A = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
A = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
A = '''▁'''
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a : int = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
__a : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__a : Tuple = do_lower_case
__a : Optional[Any] = remove_space
__a : Optional[Any] = keep_accents
__a : Union[str, Any] = vocab_file
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model )
def _lowerCamelCase ( self ):
__a : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a : str = self.__dict__.copy()
__a : Tuple = None
return state
def __setstate__( self , _UpperCAmelCase ):
__a : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a : Optional[Any] = {}
__a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.remove_space:
__a : Any = ''' '''.join(inputs.strip().split() )
else:
__a : Tuple = inputs
__a : Union[str, Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__a : List[str] = unicodedata.normalize('''NFKD''' , _UpperCAmelCase )
__a : Optional[int] = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
__a : Optional[Any] = outputs.lower()
return outputs
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = self.preprocess_text(_UpperCAmelCase )
__a : Tuple = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
__a : int = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__a : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a : Tuple = cur_pieces[1:]
else:
__a : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = []
__a : str = ''''''
__a : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
__a : Tuple = True
__a : Tuple = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__a : Optional[int] = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : int = [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Union[str, Any] = [self.sep_token_id]
__a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : List[str] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
__a : Any = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 188
| 0
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A : Optional[Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
A : Optional[int] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
A : Any = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def snake_case ( self , __a , __a , __a = False , __a = False , __a = False , __a = False , ):
__lowerCAmelCase = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__lowerCAmelCase = [[refs[i] for refs in references] for i in range(__a )]
__lowerCAmelCase = TER(
normalized=__a , no_punct=__a , asian_support=__a , case_sensitive=__a , )
__lowerCAmelCase = sb_ter.corpus_score(__a , __a )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 57
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = AlbertTokenizer
lowerCamelCase = AlbertTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = True
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : List[str],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = 'this is a test'
A__ = 'this is a test'
return input_text, output_text
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = '<pad>'
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : List[str] )-> str:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<pad>' )
self.assertEqual(vocab_keys[1],'<unk>' )
self.assertEqual(vocab_keys[-1],'▁eloquent' )
self.assertEqual(len(lowercase_ ),3_0_0_0_0 )
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 )
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],)
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = AlbertTokenizer(lowercase_ )
A__ = tokenizer.encode('sequence builders' )
A__ = tokenizer.encode('multi-sequence build' )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
| 7
| 0
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __magic_name__ :
'''simple docstring'''
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
raise NotImplementedError()
def _lowerCAmelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a = False , **_a ):
"""simple docstring"""
lowerCamelCase = tokenizer
lowerCamelCase = skip_prompt
lowerCamelCase = decode_kwargs
# variables used in the streaming process
lowerCamelCase = []
lowerCamelCase = 0
lowerCamelCase = True
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowerCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowerCamelCase = text[self.print_len :]
lowerCamelCase = []
lowerCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(_a ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCamelCase = text[self.print_len :]
self.print_len += len(_a )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCamelCase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(_a )
self.on_finalized_text(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCamelCase = text[self.print_len :]
lowerCamelCase = []
lowerCamelCase = 0
else:
lowerCamelCase = """"""
lowerCamelCase = True
self.on_finalized_text(_a , stream_end=_a )
def _lowerCAmelCase ( self , _a , _a = False ):
"""simple docstring"""
print(_a , flush=_a , end="""""" if not stream_end else None )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a = False , _a = None , **_a ):
"""simple docstring"""
super().__init__(_a , _a , **_a )
lowerCamelCase = Queue()
lowerCamelCase = None
lowerCamelCase = timeout
def _lowerCAmelCase ( self , _a , _a = False ):
"""simple docstring"""
self.text_queue.put(_a , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 358
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 1
@register_to_config
def __init__( self , _a=2_000 , _a=0.1 , _a=20 , _a=1e-3 ):
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def _lowerCAmelCase ( self , _a , _a , _a , _a=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCamelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCamelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCamelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCamelCase = std.unsqueeze(-1 )
lowerCamelCase = -score / std
# compute
lowerCamelCase = -1.0 / len(self.timesteps )
lowerCamelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCamelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCamelCase = beta_t.unsqueeze(-1 )
lowerCamelCase = -0.5 * beta_t * x
lowerCamelCase = torch.sqrt(_a )
lowerCamelCase = drift - diffusion**2 * score
lowerCamelCase = x + drift * dt
# add noise
lowerCamelCase = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
lowerCamelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 168
| 0
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =f'''{sampling_rate}'''
a ='''1'''
a ='''f32le'''
a =[
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(_UpperCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
a =ffmpeg_process.communicate(_UpperCAmelCase )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
a =output_stream[0]
a =np.frombuffer(_UpperCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def _A ( lowercase , lowercase , lowercase = "f32le" , ):
"""simple docstring"""
a =f'''{sampling_rate}'''
a ='''1'''
if format_for_conversion == "s16le":
a =2
elif format_for_conversion == "f32le":
a =4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
a =platform.system()
if system == "Linux":
a ='''alsa'''
a ='''default'''
elif system == "Darwin":
a ='''avfoundation'''
a =''':0'''
elif system == "Windows":
a ='''dshow'''
a ='''default'''
a =[
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
a =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
a =_ffmpeg_stream(_UpperCAmelCase , _UpperCAmelCase )
for item in iterator:
yield item
def _A ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
a =stream_chunk_s
else:
a =chunk_length_s
a =ffmpeg_microphone(_UpperCAmelCase , _UpperCAmelCase , format_for_conversion=_UpperCAmelCase )
if format_for_conversion == "s16le":
a =np.intaa
a =2
elif format_for_conversion == "f32le":
a =np.floataa
a =4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
a =chunk_length_s / 6
a =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_UpperCAmelCase , (int, float) ):
a =[stride_length_s, stride_length_s]
a =int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
a =int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
a =datetime.datetime.now()
a =datetime.timedelta(seconds=_UpperCAmelCase )
for item in chunk_bytes_iter(_UpperCAmelCase , _UpperCAmelCase , stride=(stride_left, stride_right) , stream=_UpperCAmelCase ):
# Put everything back in numpy scale
a =np.frombuffer(item['''raw'''] , dtype=_UpperCAmelCase )
a =(
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
a =sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _A ( lowercase , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
a =B''''''
a =stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
a =0
for raw in iterator:
acc += raw
if stream and len(_UpperCAmelCase ) < chunk_len:
a =(_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_UpperCAmelCase ) >= chunk_len:
# We are flushing the accumulator
a =(_stride_left, stride_right)
a ={'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
a =False
yield item
a =stride_left
a =acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_UpperCAmelCase ) > stride_left:
a ={'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
a =False
yield item
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =2**24 # 16Mo
try:
with subprocess.Popen(_UpperCAmelCase , stdout=subprocess.PIPE , bufsize=_UpperCAmelCase ) as ffmpeg_process:
while True:
a =ffmpeg_process.stdout.read(_UpperCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 81
|
import qiskit
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCAmelCase : str = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
__A =half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 226
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
_lowerCamelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = NllbTokenizer
__lowerCAmelCase = []
__lowerCAmelCase = []
def __init__(self : Optional[int] , _lowerCAmelCase : int=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[Any]="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : List[Any]="</s>" , _lowerCAmelCase : Tuple="<s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : Optional[int]="<mask>" , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[Any]=False , **_lowerCAmelCase : Optional[Any] , ):
A = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
A = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
A = vocab_file
A = False if not self.vocab_file else True
A = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
A = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A = src_lang if src_lang is not None else 'eng_Latn'
A = self.convert_tokens_to_ids(self._src_lang )
A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A (self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def A (self : Optional[Any] , _lowerCAmelCase : Dict ):
A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A (self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A (self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : str = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A (self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Any ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A = src_lang
A = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
A = self.convert_tokens_to_ids(snake_case__ )
A = tgt_lang_id
return inputs
def A (self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict = "eng_Latn" , _lowerCAmelCase : List[Any] = None , _lowerCAmelCase : Optional[int] = "fra_Latn" , **_lowerCAmelCase : Optional[int] , ):
A = src_lang
A = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def A (self : Any ):
return self.set_src_lang_special_tokens(self.src_lang )
def A (self : str ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A (self : Optional[Any] , _lowerCAmelCase : Any ):
A = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
A = []
A = [self.eos_token_id, self.cur_lang_code]
else:
A = [self.cur_lang_code]
A = [self.eos_token_id]
A = self.convert_ids_to_tokens(self.prefix_tokens )
A = self.convert_ids_to_tokens(self.suffix_tokens )
A = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A (self : str , _lowerCAmelCase : List[str] ):
A = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
A = []
A = [self.eos_token_id, self.cur_lang_code]
else:
A = [self.cur_lang_code]
A = [self.eos_token_id]
A = self.convert_ids_to_tokens(self.prefix_tokens )
A = self.convert_ids_to_tokens(self.suffix_tokens )
A = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def A (self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
A = os.path.join(
snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 353
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''umt5'''
__lowerCAmelCase = ['''past_key_values''']
def __init__(self : Dict , _lowerCAmelCase : Optional[int]=25_0112 , _lowerCAmelCase : int=512 , _lowerCAmelCase : Any=64 , _lowerCAmelCase : int=1024 , _lowerCAmelCase : int=8 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[int]=6 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=1e-6 , _lowerCAmelCase : Dict=1.0 , _lowerCAmelCase : Tuple="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="T5Tokenizer" , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def A (self : Optional[Any] ):
return self.d_model
@property
def A (self : List[Any] ):
return self.num_heads
@property
def A (self : Dict ):
return self.num_layers
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A (self : Optional[Any] ):
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A (self : Union[str, Any] ):
return 13
@property
def A (self : Tuple ):
return 5e-4
| 337
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=1024 ) -> List[str]:
A_ : str = [], []
A_ : List[str] = list(zip(snake_case__ , snake_case__ ) )
A_ : Dict = sorted_examples[0]
def is_too_big(_lowerCAmelCase : Union[str, Any] ):
return tok(snake_case__ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A_ : str = new_src + ' ' + src
A_ : Optional[int] = new_tgt + ' ' + tgt
if is_too_big(snake_case__ ) or is_too_big(snake_case__ ): # cant fit, finalize example
finished_src.append(snake_case__ )
finished_tgt.append(snake_case__ )
A_ : List[str] = src, tgt
else: # can fit, keep adding
A_ : Dict = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(snake_case__ )
finished_tgt.append(snake_case__ )
return finished_src, finished_tgt
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Path , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple ) -> Optional[int]:
A_ : Optional[int] = Path(snake_case__ )
save_path.mkdir(exist_ok=snake_case__ )
for split in ["train"]:
A_ : int = data_dir / f"{split}.source", data_dir / f"{split}.target"
A_ : List[Any] = [x.rstrip() for x in Path(snake_case__ ).open().readlines()]
A_ : List[str] = [x.rstrip() for x in Path(snake_case__ ).open().readlines()]
A_ : Any = pack_examples(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
print(f"packed {split} split from {len(snake_case__ )} examples -> {len(snake_case__ )}." )
Path(save_path / f"{split}.source" ).open("w" ).write("\n".join(snake_case__ ) )
Path(save_path / f"{split}.target" ).open("w" ).write("\n".join(snake_case__ ) )
for split in ["val", "test"]:
A_ : Optional[int] = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(snake_case__ , save_path / f"{split}.source" )
shutil.copyfile(snake_case__ , save_path / f"{split}.target" )
def __snake_case ( ) -> List[str]:
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=snake_case__ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=snake_case__ , default=128 )
parser.add_argument("--data_dir" , type=snake_case__ )
parser.add_argument("--save_path" , type=snake_case__ )
A_ : Optional[Any] = parser.parse_args()
A_ : List[str] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(snake_case__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 300
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase ( snake_case__ : Tuple ) -> List[str]:
UpperCamelCase : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( a__ , a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionLatentUpscalePipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
UpperCAmelCase__ : str = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
UpperCAmelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([] )
UpperCAmelCase__ : Optional[int] = True
@property
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = 1
UpperCamelCase : List[str] = 4
UpperCamelCase : List[str] = (16, 16)
UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
def snake_case_ ( self ) -> int:
torch.manual_seed(0 )
UpperCamelCase : int = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=SCREAMING_SNAKE_CASE_, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=SCREAMING_SNAKE_CASE_, only_cross_attention=SCREAMING_SNAKE_CASE_, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
UpperCamelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
UpperCamelCase : Tuple = EulerDiscreteScheduler(prediction_type='sample' )
UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='quick_gelu', projection_dim=512, )
UpperCamelCase : Any = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase : Optional[int] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = 'cpu'
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 256, 256, 3) )
UpperCamelCase : List[Any] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_, 1e-3 )
def snake_case_ ( self ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> Tuple:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> int:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_, scheduler_enum.name )
UpperCamelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_ )[0]
outputs.append(SCREAMING_SNAKE_CASE_ )
assert check_same_shape(SCREAMING_SNAKE_CASE_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Dict = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Union[str, Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
UpperCamelCase : int = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, output_type='latent' ).images
UpperCamelCase : List[str] = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Dict = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
UpperCamelCase : str = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 119
| 0
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if not (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
A__ = len(UpperCAmelCase_ )
A__ = len(UpperCAmelCase_ )
A__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
A__ = 0
A__ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
A__ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
A__ = i
A__ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 50 ):
A__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 69
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ :Any = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def A ( a_ ,a_ ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,) -> Tuple:
if attention_mask is None:
__UpperCamelCase : Optional[int] =np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
__UpperCamelCase : Tuple =np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
__UpperCamelCase : Optional[int] =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase : List[str] =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase : Tuple =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=99 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : int =parent
__UpperCamelCase : int =batch_size
__UpperCamelCase : int =seq_length
__UpperCamelCase : Union[str, Any] =is_training
__UpperCamelCase : str =use_labels
__UpperCamelCase : str =vocab_size
__UpperCamelCase : Optional[int] =hidden_size
__UpperCamelCase : Tuple =num_hidden_layers
__UpperCamelCase : Optional[int] =num_attention_heads
__UpperCamelCase : List[str] =intermediate_size
__UpperCamelCase : str =hidden_act
__UpperCamelCase : Optional[Any] =hidden_dropout_prob
__UpperCamelCase : Dict =attention_probs_dropout_prob
__UpperCamelCase : Any =max_position_embeddings
__UpperCamelCase : List[Any] =eos_token_id
__UpperCamelCase : Tuple =pad_token_id
__UpperCamelCase : Optional[int] =bos_token_id
__UpperCamelCase : List[str] =initializer_range
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase : Any =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase : Any =shift_tokens_right(lowercase_ , 1 , 2 )
__UpperCamelCase : Dict =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
__UpperCamelCase : Optional[int] =prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[int] =self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =20
__UpperCamelCase : List[Any] =model_class_name(lowercase_ )
__UpperCamelCase : Optional[int] =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase : Optional[Any] =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase : int =model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
__UpperCamelCase : Union[str, Any] =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase : List[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : int =model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
__UpperCamelCase : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase : Any =model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
__UpperCamelCase : Optional[Any] =model.decode(lowercase_ , lowercase_ )
__UpperCamelCase : List[str] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Tuple =20
__UpperCamelCase : str =model_class_name(lowercase_ )
__UpperCamelCase : List[Any] =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase : Dict =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase : Any =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase : Any =model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
__UpperCamelCase : int =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase : str =model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
__UpperCamelCase : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase : List[Any] =model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
__UpperCamelCase : Any =model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
__UpperCamelCase : int =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =9_9
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase : Tuple =input_ids.shape[0]
__UpperCamelCase : Any =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] =self._get_config_and_data()
__UpperCamelCase : Any =FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
__UpperCamelCase : str =lm_model(input_ids=lowercase_ )
__UpperCamelCase : Any =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCamelCase : Optional[int] =FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
__UpperCamelCase : Union[str, Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCamelCase : Tuple =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase : Dict =lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
__UpperCamelCase : str =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCamelCase : str =shift_tokens_right(lowercase_ , 1 , 2 )
__UpperCamelCase : int =np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
__UpperCamelCase : Any =np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __A ( lowerCamelCase__ , unittest.TestCase , lowerCamelCase__ ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =True
UpperCamelCase__ : List[Any] =(
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase__ : Optional[Any] =(FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =FlaxBlenderbotSmallModelTester(self )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : str =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : str =self._prepare_for_class(lowercase_ , lowercase_ )
__UpperCamelCase : Union[str, Any] =model_class(lowercase_ )
@jax.jit
def encode_jitted(lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest('JIT Enabled' ):
__UpperCamelCase : Tuple =encode_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase : str =encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : Dict =model_class(lowercase_ )
__UpperCamelCase : Union[str, Any] =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__UpperCamelCase : str ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest('JIT Enabled' ):
__UpperCamelCase : Optional[Any] =decode_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase : str =decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__UpperCamelCase : Tuple =model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase : Tuple =np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase : List[str] =model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 71
|
from __future__ import annotations
def UpperCAmelCase__ ( _A : float , _A : float , _A : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188
| 0
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _UpperCamelCase(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE ) for s in shape] )}.npy"""
def snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : Dict=(4, 4, 64, 64) , SCREAMING_SNAKE_CASE : Dict=False ):
lowercase__ : int = jnp.bfloataa if fpaa else jnp.floataa
lowercase__ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) , dtype=SCREAMING_SNAKE_CASE )
return image
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : int="CompVis/stable-diffusion-v1-4" ):
lowercase__ : str = jnp.bfloataa if fpaa else jnp.floataa
lowercase__ : int = "bf16" if fpaa else None
lowercase__ : List[str] = FlaxUNetaDConditionModel.from_pretrained(
SCREAMING_SNAKE_CASE , subfolder="unet" , dtype=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE )
return model, params
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : List[str]=(4, 77, 768) , SCREAMING_SNAKE_CASE : Optional[int]=False ):
lowercase__ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowercase__ : int = jnp.array(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) , dtype=SCREAMING_SNAKE_CASE )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1_000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.get_latents(SCREAMING_SNAKE_CASE , fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE , fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : int = model.apply(
{"params": params} , SCREAMING_SNAKE_CASE , jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowercase__ : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase__ : List[str] = jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1_000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Dict = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.get_latents(SCREAMING_SNAKE_CASE , shape=(4, 4, 96, 96) , fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.get_encoder_hidden_states(SCREAMING_SNAKE_CASE , shape=(4, 77, 1_024) , fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model.apply(
{"params": params} , SCREAMING_SNAKE_CASE , jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa ) , encoder_hidden_states=SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
lowercase__ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowercase__ : Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-2 )
| 350
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : str="divided_space_time" , SCREAMING_SNAKE_CASE : Tuple=None , ):
lowercase__ : List[str] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : str = num_frames
lowercase__ : List[str] = is_training
lowercase__ : List[str] = use_labels
lowercase__ : int = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Tuple = attention_type
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = scope
lowercase__ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase__ : Union[str, Any] = (num_frames) * self.num_patches_per_frame + 1
def snake_case ( self : Optional[int] ):
lowercase__ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
lowercase__ : Optional[int] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__ : List[Any] = self.num_labels
return config
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = TimesformerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[Any] = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
# verify the logits shape
lowercase__ : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Tuple = TimesformerModelTester(self )
lowercase__ : Any = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=False ):
lowercase__ : Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
lowercase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def snake_case ( self : Any ):
pass
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowercase__ : List[str] = self.model_tester.seq_length
lowercase__ : Any = self.model_tester.num_frames
lowercase__ : Optional[int] = True
lowercase__ : List[str] = False
lowercase__ : List[Any] = True
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Dict = True
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__ : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : Tuple = True
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowercase__ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : int = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 121
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : str = set()
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Optional[Any] = n + 1 # maximum limit
for a in range(2 , lowerCAmelCase__ ):
for b in range(2 , lowerCAmelCase__ ):
__lowerCamelCase : Tuple = a**b # calculates the current power
collect_powers.add(lowerCAmelCase__ ) # adds the result to the set
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 185
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ : Any = TypeVar("T")
class a ( Generic[T] ):
def __init__( self , __magic_name__ , __magic_name__ ) -> None:
_a = None
_a = len(__magic_name__ )
_a = [any_type for _ in range(self.N )] + arr
_a = fnc
self.build()
def __UpperCAmelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
p += self.N
_a = v
while p > 1:
_a = p // 2
_a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> T | None: # noqa: E741
_a , _a = l + self.N, r + self.N
_a = None
while l <= r:
if l % 2 == 1:
_a = self.st[l] if res is None else self.fn(__magic_name__ , self.st[l] )
if r % 2 == 0:
_a = self.st[r] if res is None else self.fn(__magic_name__ , self.st[r] )
_a , _a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ : Union[str, Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
a_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
a_ : Dict = SegmentTree(test_array, min)
a_ : Optional[int] = SegmentTree(test_array, max)
a_ : int = SegmentTree(test_array, lambda a, b: a + b)
def _A () -> None:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
_a = reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert max_range == max_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
a_ : Optional[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 168
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A_ : int = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Union[str, Any] = (32, 32)
_UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(a_ )
return image
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _snake_case ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModel(a_ )
@property
def _snake_case ( self ) -> Union[str, Any]:
def extract(*a_ ,**a_ ):
class lowercase :
"""simple docstring"""
def __init__( self ) -> Any:
_UpperCAmelCase : str = torch.ones([0] )
def _snake_case ( self ,a_ ) -> Any:
self.pixel_values.to(a_ )
return self
return Out()
return extract
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
_UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=a_ ,set_alpha_to_one=a_ ,)
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : Optional[int] = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : int = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : int = output.images
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.dummy_cond_unet
_UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : int = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : str = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : int = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe([prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_UpperCAmelCase : Any = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=a_ ,)[0]
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" ,safety_checker=a_ )
assert isinstance(a_ ,a_ )
assert isinstance(pipe.scheduler ,a_ )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Optional[int] = self.dummy_cond_unet
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=a_ )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_UpperCAmelCase : str = unet.half()
_UpperCAmelCase : List[str] = vae.half()
_UpperCAmelCase : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionPipeline(
unet=a_ ,scheduler=a_ ,vae=a_ ,text_encoder=a_ ,tokenizer=a_ ,safety_checker=a_ ,feature_extractor=self.dummy_extractor ,)
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = """A painting of a squirrel eating a burger"""
_UpperCAmelCase : int = sd_pipe([prompt] ,num_inference_steps=2 ,output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : int = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_UpperCAmelCase : Any = 4_003_660_346
_UpperCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : str = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase : List[str] = torch.manual_seed(a_ )
_UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ,safety_checker=a_ )
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Any = """padme amidala taking a bath artwork, safe for work, no nudity"""
_UpperCAmelCase : Optional[Any] = 2_734_971_755
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : int = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_UpperCAmelCase : Optional[int] = torch.manual_seed(a_ )
_UpperCAmelCase : int = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_UpperCAmelCase : List[str] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Optional[int] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_UpperCAmelCase : Dict = 1_044_355_234
_UpperCAmelCase : int = 12
_UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ )
_UpperCAmelCase : List[str] = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_UpperCAmelCase : Tuple = torch.manual_seed(a_ )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] ,generator=a_ ,guidance_scale=a_ ,num_inference_steps=50 ,output_type="""np""" ,width=512 ,height=512 ,sld_guidance_scale=2_000 ,sld_warmup_steps=7 ,sld_threshold=0.025 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349
| 1
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase__ : List[str] = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
UpperCAmelCase__ : Any = 'hopper-medium-v2'
UpperCAmelCase__ : Union[str, Any] = gym.make(env_name)
UpperCAmelCase__ : List[str] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
UpperCAmelCase__ : str = env.reset()
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[Any] = 1_0_0_0
UpperCAmelCase__ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase__ : Optional[Any] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = env.step(denorm_actions)
UpperCAmelCase__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase__ : List[Any] = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 25
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'deta'
A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = backbone_config.pop('''model_type''' )
lowercase : Any = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = backbone_config
lowercase : Union[str, Any] = num_queries
lowercase : Any = max_position_embeddings
lowercase : int = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Tuple = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Optional[int] = decoder_layers
lowercase : int = decoder_attention_heads
lowercase : Any = dropout
lowercase : int = attention_dropout
lowercase : Dict = activation_dropout
lowercase : int = activation_function
lowercase : Dict = init_std
lowercase : List[str] = init_xavier_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Tuple = auxiliary_loss
lowercase : Tuple = position_embedding_type
# deformable attributes
lowercase : List[str] = num_feature_levels
lowercase : Tuple = encoder_n_points
lowercase : Optional[int] = decoder_n_points
lowercase : Tuple = two_stage
lowercase : Optional[Any] = two_stage_num_proposals
lowercase : Union[str, Any] = with_box_refine
lowercase : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase : Optional[Any] = class_cost
lowercase : str = bbox_cost
lowercase : List[Any] = giou_cost
# Loss coefficients
lowercase : Tuple = mask_loss_coefficient
lowercase : Any = dice_loss_coefficient
lowercase : Dict = bbox_loss_coefficient
lowercase : Tuple = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
lowercase : Tuple = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.backbone_config.to_dict()
lowercase : List[str] = self.__class__.model_type
return output
| 337
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase_ , """hidden_sizes"""))
self.parent.assertTrue(hasattr(lowerCAmelCase_ , """num_attention_heads"""))
self.parent.assertTrue(hasattr(lowerCAmelCase_ , """num_encoder_blocks"""))
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Optional[int]=6_4 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Any=[2, 2, 2, 2] , lowerCAmelCase_ : str=[8, 4, 2, 1] , lowerCAmelCase_ : Any=[1_6, 3_2, 6_4, 1_2_8] , lowerCAmelCase_ : List[str]=[1, 4, 8, 1_6] , lowerCAmelCase_ : Optional[Any]=[1, 2, 4, 8] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Tuple=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = num_encoder_blocks
lowercase_ = sr_ratios
lowercase_ = depths
lowercase_ = hidden_sizes
lowercase_ = downsampling_rates
lowercase_ = num_attention_heads
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = scope
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowercase_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = SegformerModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
lowercase_ = lowercase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = SegformerForSemanticSegmentation(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
lowercase_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = 1
lowercase_ = SegformerForSemanticSegmentation(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertGreater(result.loss , 0.0)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = SegformerModelTester(self)
lowercase_ = SegformerConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase_)
@unittest.skip("""SegFormer does not use inputs_embeds""")
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""")
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
lowercase_ = outputs.attentions
lowercase_ = sum(self.model_tester.depths)
self.assertEqual(len(lowerCAmelCase_) , lowerCAmelCase_)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ = True
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
lowercase_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase_) , lowerCAmelCase_)
# verify the first attentions (first block, first layer)
lowercase_ = (self.model_tester.image_size // 4) ** 2
lowercase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowercase_ = (self.model_tester.image_size // 3_2) ** 2
lowercase_ = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowercase_ = len(lowerCAmelCase_)
# Check attention is always last and order is fine
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
self.assertEqual(out_len + 1 , len(lowerCAmelCase_))
lowercase_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase_) , lowerCAmelCase_)
# verify the first attentions (first block, first layer)
lowercase_ = (self.model_tester.image_size // 4) ** 2
lowercase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple):
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_))
lowercase_ = outputs.hidden_states
lowercase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCAmelCase_) , lowerCAmelCase_)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_):
continue
lowercase_ = model_class(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.train()
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_)
lowercase_ = model(**lowerCAmelCase_).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self : str):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = SegformerModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase_ , align=lowerCAmelCase_ , do_random_crop=lowerCAmelCase_)
lowercase_ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""").to(
lowerCAmelCase_)
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""")
lowercase_ = encoded_inputs.pixel_values.to(lowerCAmelCase_)
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)
lowercase_ = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
lowercase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1E-4))
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase_ , align=lowerCAmelCase_ , do_random_crop=lowerCAmelCase_)
lowercase_ = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""").to(lowerCAmelCase_)
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""")
lowercase_ = encoded_inputs.pixel_values.to(lowerCAmelCase_)
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)
lowercase_ = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
lowercase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1E-1))
@slow
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase_ , align=lowerCAmelCase_ , do_random_crop=lowerCAmelCase_)
lowercase_ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""").to(
lowerCAmelCase_)
lowercase_ = prepare_img()
lowercase_ = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""")
lowercase_ = encoded_inputs.pixel_values.to(lowerCAmelCase_)
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_)
lowercase_ = outputs.logits.detach().cpu()
lowercase_ = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ , target_sizes=[(5_0_0, 3_0_0)])
lowercase_ = torch.Size((5_0_0, 3_0_0))
self.assertEqual(segmentation[0].shape , lowerCAmelCase_)
lowercase_ = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_)
lowercase_ = torch.Size((1_2_8, 1_2_8))
self.assertEqual(segmentation[0].shape , lowerCAmelCase_)
| 313
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = "gelu"
def __init__( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : str=9_9 , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]=2_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Optional[Any]=0 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = eos_token_id
lowercase_ = pad_token_id
lowercase_ = bos_token_id
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
lowercase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
lowercase_ = np.concatenate([input_ids, eos_tensor] , axis=1)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ = prepare_pegasus_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = 2_0
lowercase_ = model_class_name(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""])
lowercase_ , lowercase_ = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
lowercase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
lowercase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase_ , decoder_position_ids=lowerCAmelCase_ , )
lowercase_ = model.decode(lowerCAmelCase_ , lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_)
lowercase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''')
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowercase_ = np.not_equal(__lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = FlaxPegasusModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = model_class(lowerCAmelCase_)
@jax.jit
def encode_jitted(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Optional[int]):
return model.encode(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
with self.subTest("""JIT Enabled"""):
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = encode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
lowercase_ = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
return model.decode(
decoder_input_ids=lowerCAmelCase_ , decoder_attention_mask=lowerCAmelCase_ , encoder_outputs=lowerCAmelCase_ , )
with self.subTest("""JIT Enabled"""):
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
lowercase_ = decode_jitted(**lowerCAmelCase_).to_tuple()
self.assertEqual(len(lowerCAmelCase_) , len(lowerCAmelCase_))
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowerCAmelCase_)
lowercase_ = np.ones((1, 1))
lowercase_ = model(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
lowercase_ = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
lowercase_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase_ = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowercase_ = tokenizer(lowerCAmelCase_ , return_tensors="""np""" , truncation=lowerCAmelCase_ , max_length=5_1_2 , padding=lowerCAmelCase_)
lowercase_ = model.generate(**lowerCAmelCase_ , num_beams=2).sequences
lowercase_ = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
assert tgt_text == decoded
| 313
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_snake_case : Optional[Any] = logging.getLogger()
def lowerCAmelCase_ ( ):
__snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("-f" )
__snake_case : List[Any] = parser.parse_args()
return args.f
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = {}
__snake_case : Tuple = os.path.join(__lowerCamelCase , "all_results.json" )
if os.path.exists(__lowerCamelCase ):
with open(__lowerCamelCase , "r" ) as f:
__snake_case : Dict = json.load(__lowerCamelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
def lowerCAmelCase_ ( ):
__snake_case : Optional[Any] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_snake_case : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a (lowerCAmelCase__ ):
"""simple docstring"""
@classmethod
def __snake_case ( cls : int ) -> Optional[int]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : int = tempfile.mkdtemp()
__snake_case : Any = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__snake_case : Tuple = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __snake_case ( cls : Dict ) -> Optional[int]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = F'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : Dict ) -> Union[str, Any]:
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = F'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(lowerCAmelCase__ )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : Optional[int] ) -> List[Any]:
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : Optional[Any] = F'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(lowerCAmelCase__ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : str ) -> List[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Dict = 7 if get_gpu_count() > 1 else 2
__snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
__snake_case : int = F'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__snake_case : Tuple = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = F'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__snake_case : Optional[Any] = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : Dict ) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = F'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__snake_case : Any = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : Any ) -> Any:
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = F'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : Union[str, Any] ) -> str:
__snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
__snake_case : Union[str, Any] = F'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__snake_case : Optional[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "translation_no_trainer" ) ) )
@slow
def __snake_case ( self : int ) -> Optional[Any]:
__snake_case : str = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = F'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __snake_case ( self : Optional[Any] ) -> List[Any]:
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : str = F'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__snake_case : int = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , "image_classification_no_trainer" ) ) )
| 123
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase ( ) -> int:
snake_case_ = HfArgumentParser(UpperCAmelCase )
snake_case_ = parser.parse_args_into_dataclasses()[0]
snake_case_ = TensorFlowBenchmark(args=UpperCAmelCase )
try:
snake_case_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
snake_case_ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
snake_case_ = ' '.join(str(UpperCAmelCase ).split(' ' )[:-1] )
snake_case_ = ''
snake_case_ = eval(str(UpperCAmelCase ).split(' ' )[-1] )
snake_case_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = full_error_msg + begin_error_msg + str(UpperCAmelCase )
raise ValueError(UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 69
| 0
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = OpenAIGPTTokenizer
__snake_case = OpenAIGPTTokenizerFast
__snake_case = True
__snake_case = False
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
a = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_snake_case ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] ) ->Tuple:
"""simple docstring"""
return "lower newer", "lower newer"
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
a = '''lower'''
a = ['''low''', '''er</w>''']
a = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
a = tokens + ['''<unk>''']
a = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : List[Any]=15 ) ->List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# Simple input
a = '''This is a simple input'''
a = ['''This is a simple input 1''', '''This is a simple input 2''']
a = ('''This is a simple input''', '''This is a pair''')
a = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
def __lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowercase_ ( lowercase ):
'''simple docstring'''
pass
| 357
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = value_function
a = unet
a = scheduler
a = env
a = env.get_dataset()
a = {}
for key in self.data.keys():
try:
a = self.data[key].mean()
except: # noqa: E722
pass
a = {}
for key in self.data.keys():
try:
a = self.data[key].std()
except: # noqa: E722
pass
a = env.observation_space.shape[0]
a = env.action_space.shape[0]
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
if type(__UpperCAmelCase ) is dict:
return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__UpperCAmelCase , device=self.unet.device )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
for key, val in cond.items():
a = val.clone()
return x_in
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = x.shape[0]
a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
a = torch.full((batch_size,) , __UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a = self.value_function(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample
a = torch.autograd.grad([y.sum()] , [x] )[0]
a = self.scheduler._get_variance(__UpperCAmelCase )
a = torch.exp(0.5 * posterior_variance )
a = model_std * grad
a = 0
a = x.detach()
a = x + scale * grad
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.unet(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , predict_epsilon=__UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.1 ) ->List[str]:
"""simple docstring"""
a = self.normalize(__UpperCAmelCase , '''observations''' )
a = obs[None].repeat(__UpperCAmelCase , axis=0 )
a = {0: self.to_torch(__UpperCAmelCase )}
a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a = randn_tensor(__UpperCAmelCase , device=self.unet.device )
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
# run the diffusion process
a , a = self.run_diffusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# sort output trajectories by value
a = y.argsort(0 , descending=__UpperCAmelCase ).squeeze()
a = x[sorted_idx]
a = sorted_values[:, :, : self.action_dim]
a = actions.detach().cpu().numpy()
a = self.de_normalize(__UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
a = 0
else:
# if we didn't run value guiding, select a random action
a = np.random.randint(0 , __UpperCAmelCase )
a = denorm_actions[selected_index, 0]
return denorm_actions
| 26
| 0
|
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_UpperCamelCase : Any = logging.get_logger(__name__)
class a ( SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Tuple ='''AutoTokenizer'''
UpperCAmelCase_ : str =['''tokenizer''']
UpperCAmelCase_ : List[Any] ={
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , _lowerCamelCase , _lowerCamelCase=None ):
super().__init__(lowerCAmelCase_ )
lowercase = speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls , _lowerCamelCase , _lowerCamelCase="speaker_embeddings_path.json" , **_lowerCamelCase ):
if speaker_embeddings_dict_path is not None:
lowercase = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , subfolder=kwargs.pop('subfolder' , lowerCAmelCase_ ) , cache_dir=kwargs.pop('cache_dir' , lowerCAmelCase_ ) , force_download=kwargs.pop('force_download' , lowerCAmelCase_ ) , proxies=kwargs.pop('proxies' , lowerCAmelCase_ ) , resume_download=kwargs.pop('resume_download' , lowerCAmelCase_ ) , local_files_only=kwargs.pop('local_files_only' , lowerCAmelCase_ ) , use_auth_token=kwargs.pop('use_auth_token' , lowerCAmelCase_ ) , revision=kwargs.pop('revision' , lowerCAmelCase_ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
lowercase = None
else:
with open(lowerCAmelCase_ ) as speaker_embeddings_json:
lowercase = json.load(lowerCAmelCase_ )
else:
lowercase = None
lowercase = AutoTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(tokenizer=lowerCAmelCase_ , speaker_embeddings=lowerCAmelCase_ )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase="speaker_embeddings_path.json" , _lowerCamelCase="speaker_embeddings" , _lowerCamelCase = False , **_lowerCamelCase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , 'v2' ) , exist_ok=lowerCAmelCase_ )
lowercase = {}
lowercase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase = self._load_voice_preset(lowerCAmelCase_ )
lowercase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowerCAmelCase_ , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowerCAmelCase_ , )
lowercase = os.path.join(lowerCAmelCase_ , F'{prompt_key}_{key}.npy' )
lowercase = tmp_dict
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , 'w' ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase_ ( self , _lowerCamelCase = None , **_lowerCamelCase ):
lowercase = self.speaker_embeddings[voice_preset]
lowercase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
lowercase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowerCAmelCase_ ) , cache_dir=kwargs.pop('cache_dir' , lowerCAmelCase_ ) , force_download=kwargs.pop('force_download' , lowerCAmelCase_ ) , proxies=kwargs.pop('proxies' , lowerCAmelCase_ ) , resume_download=kwargs.pop('resume_download' , lowerCAmelCase_ ) , local_files_only=kwargs.pop('local_files_only' , lowerCAmelCase_ ) , use_auth_token=kwargs.pop('use_auth_token' , lowerCAmelCase_ ) , revision=kwargs.pop('revision' , lowerCAmelCase_ ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
lowercase = np.load(lowerCAmelCase_ )
return voice_preset_dict
def UpperCamelCase_ ( self , _lowerCamelCase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="pt" , _lowerCamelCase=2_5_6 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=False , **_lowerCamelCase , ):
if voice_preset is not None and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase = self._load_voice_preset(lowerCAmelCase_ )
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not voice_preset.endswith('.npz' ):
lowercase = voice_preset + '''.npz'''
lowercase = np.load(lowerCAmelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
lowercase = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
lowercase = self.tokenizer(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
if voice_preset is not None:
lowercase = voice_preset
return encoded_text
| 220
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase_ ) for s in shape] )}.npy"""
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=(4, 4, 6_4, 6_4) , lowerCAmelCase_ : List[str]=False ):
"""simple docstring"""
_A: List[str] = jnp.bfloataa if fpaa else jnp.floataa
_A: Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return image
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
_A: Tuple = jnp.bfloataa if fpaa else jnp.floataa
_A: str = '''bf16''' if fpaa else None
_A , _A: Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase_ , subfolder='''unet''' , dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ )
return model, params
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : str=(4, 7_7, 7_6_8) , lowerCAmelCase_ : Dict=False ):
"""simple docstring"""
_A: Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_A: Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A , _A: Optional[Any] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase_ )
_A: List[str] = self.get_latents(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: Optional[int] = self.get_encoder_hidden_states(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: List[str] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: Tuple = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_latents(lowerCAmelCase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_encoder_hidden_states(lowerCAmelCase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase_ )
_A: Optional[int] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: List[str] = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
| 121
| 0
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Dict = set({"(", "[", "{"} )
UpperCAmelCase_ : Any = set({")", "]", "}"} )
UpperCAmelCase_ : Optional[int] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def a__ ( ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , "is balanced" )
else:
print(_SCREAMING_SNAKE_CASE , "is not balanced" )
if __name__ == "__main__":
main()
| 67
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
if gpta_config_file == "":
UpperCAmelCase_ : List[str] = GPTaConfig()
else:
UpperCAmelCase_ : int = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = GPTaModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 67
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.